Linux 6.9-rc1
[linux-2.6-microblaze.git] / mm / swap.c
index 275a4ea..500a09a 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
 #define CREATE_TRACE_POINTS
 #include <trace/events/pagemap.h>
 
-/* How many pages do we try to swap or page in/out together? */
+/* How many pages do we try to swap or page in/out together? As a power of 2 */
 int page_cluster;
+const int page_cluster_max = 31;
 
-/* Protecting only lru_rotate.pvec which requires disabling interrupts */
+/* Protecting only lru_rotate.fbatch which requires disabling interrupts */
 struct lru_rotate {
        local_lock_t lock;
-       struct pagevec pvec;
+       struct folio_batch fbatch;
 };
 static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
        .lock = INIT_LOCAL_LOCK(lock),
 };
 
 /*
- * The following struct pagevec are grouped together because they are protected
+ * The following folio batches are grouped together because they are protected
  * by disabling preemption (and interrupts remain enabled).
  */
-struct lru_pvecs {
+struct cpu_fbatches {
        local_lock_t lock;
-       struct pagevec lru_add;
-       struct pagevec lru_deactivate_file;
-       struct pagevec lru_deactivate;
-       struct pagevec lru_lazyfree;
+       struct folio_batch lru_add;
+       struct folio_batch lru_deactivate_file;
+       struct folio_batch lru_deactivate;
+       struct folio_batch lru_lazyfree;
 #ifdef CONFIG_SMP
-       struct pagevec activate_page;
+       struct folio_batch activate;
 #endif
 };
-static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
+static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
        .lock = INIT_LOCAL_LOCK(lock),
 };
 
+static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
+               unsigned long *flagsp)
+{
+       if (folio_test_lru(folio)) {
+               folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
+               lruvec_del_folio(*lruvecp, folio);
+               __folio_clear_lru_flags(folio);
+       }
+
+       /*
+        * In rare cases, when truncation or holepunching raced with
+        * munlock after VM_LOCKED was cleared, Mlocked may still be
+        * found set here.  This does not indicate a problem, unless
+        * "unevictable_pgs_cleared" appears worryingly large.
+        */
+       if (unlikely(folio_test_mlocked(folio))) {
+               long nr_pages = folio_nr_pages(folio);
+
+               __folio_clear_mlocked(folio);
+               zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
+               count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
+       }
+}
+
 /*
  * This path almost never happens for VM activity - pages are normally freed
- * via pagevecs.  But it gets used by networking - and for compound pages.
+ * in batches.  But it gets used by networking - and for compound pages.
  */
-static void __page_cache_release(struct page *page)
+static void page_cache_release(struct folio *folio)
 {
-       if (PageLRU(page)) {
-               struct folio *folio = page_folio(page);
-               struct lruvec *lruvec;
-               unsigned long flags;
+       struct lruvec *lruvec = NULL;
+       unsigned long flags;
 
-               lruvec = folio_lruvec_lock_irqsave(folio, &flags);
-               del_page_from_lru_list(page, lruvec);
-               __clear_page_lru_flags(page);
+       __page_cache_release(folio, &lruvec, &flags);
+       if (lruvec)
                unlock_page_lruvec_irqrestore(lruvec, flags);
-       }
-       /* See comment on PageMlocked in release_pages() */
-       if (unlikely(PageMlocked(page))) {
-               int nr_pages = thp_nr_pages(page);
-
-               __ClearPageMlocked(page);
-               mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
-               count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
-       }
 }
 
-static void __put_single_page(struct page *page)
+static void __folio_put_small(struct folio *folio)
 {
-       __page_cache_release(page);
-       mem_cgroup_uncharge(page_folio(page));
-       free_unref_page(page, 0);
+       page_cache_release(folio);
+       mem_cgroup_uncharge(folio);
+       free_unref_page(&folio->page, 0);
 }
 
-static void __put_compound_page(struct page *page)
+static void __folio_put_large(struct folio *folio)
 {
        /*
         * __page_cache_release() is supposed to be called for thp, not for
@@ -114,21 +127,21 @@ static void __put_compound_page(struct page *page)
         * (it's never listed to any LRU lists) and no memcg routines should
         * be called for hugetlb (it has a separate hugetlb_cgroup.)
         */
-       if (!PageHuge(page))
-               __page_cache_release(page);
-       destroy_compound_page(page);
+       if (!folio_test_hugetlb(folio))
+               page_cache_release(folio);
+       destroy_large_folio(folio);
 }
 
-void __put_page(struct page *page)
+void __folio_put(struct folio *folio)
 {
-       if (unlikely(is_zone_device_page(page)))
-               free_zone_device_page(page);
-       else if (unlikely(PageCompound(page)))
-               __put_compound_page(page);
+       if (unlikely(folio_is_zone_device(folio)))
+               free_zone_device_page(&folio->page);
+       else if (unlikely(folio_test_large(folio)))
+               __folio_put_large(folio);
        else
-               __put_single_page(page);
+               __folio_put_small(folio);
 }
-EXPORT_SYMBOL(__put_page);
+EXPORT_SYMBOL(__folio_put);
 
 /**
  * put_pages_list() - release a list of pages
@@ -138,86 +151,106 @@ EXPORT_SYMBOL(__put_page);
  */
 void put_pages_list(struct list_head *pages)
 {
-       struct page *page, *next;
+       struct folio_batch fbatch;
+       struct folio *folio, *next;
 
-       list_for_each_entry_safe(page, next, pages, lru) {
-               if (!put_page_testzero(page)) {
-                       list_del(&page->lru);
+       folio_batch_init(&fbatch);
+       list_for_each_entry_safe(folio, next, pages, lru) {
+               if (!folio_put_testzero(folio))
                        continue;
-               }
-               if (PageHead(page)) {
-                       list_del(&page->lru);
-                       __put_compound_page(page);
+               if (folio_test_large(folio)) {
+                       __folio_put_large(folio);
                        continue;
                }
-               /* Cannot be PageLRU because it's passed to us using the lru */
+               /* LRU flag must be clear because it's passed using the lru */
+               if (folio_batch_add(&fbatch, folio) > 0)
+                       continue;
+               free_unref_folios(&fbatch);
        }
 
-       free_unref_page_list(pages);
+       if (fbatch.nr)
+               free_unref_folios(&fbatch);
        INIT_LIST_HEAD(pages);
 }
 EXPORT_SYMBOL(put_pages_list);
 
-/*
- * get_kernel_pages() - pin kernel pages in memory
- * @kiov:      An array of struct kvec structures
- * @nr_segs:   number of segments to pin
- * @write:     pinning for read/write, currently ignored
- * @pages:     array that receives pointers to the pages pinned.
- *             Should be at least nr_segs long.
- *
- * Returns number of pages pinned. This may be fewer than the number requested.
- * If nr_segs is 0 or negative, returns 0.  If no pages were pinned, returns 0.
- * Each page returned must be released with a put_page() call when it is
- * finished with.
- */
-int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
-               struct page **pages)
+typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
+
+static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
 {
-       int seg;
+       int was_unevictable = folio_test_clear_unevictable(folio);
+       long nr_pages = folio_nr_pages(folio);
 
-       for (seg = 0; seg < nr_segs; seg++) {
-               if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
-                       return seg;
+       VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
-               pages[seg] = kmap_to_page(kiov[seg].iov_base);
-               get_page(pages[seg]);
+       /*
+        * Is an smp_mb__after_atomic() still required here, before
+        * folio_evictable() tests the mlocked flag, to rule out the possibility
+        * of stranding an evictable folio on an unevictable LRU?  I think
+        * not, because __munlock_folio() only clears the mlocked flag
+        * while the LRU lock is held.
+        *
+        * (That is not true of __page_cache_release(), and not necessarily
+        * true of folios_put(): but those only clear the mlocked flag after
+        * folio_put_testzero() has excluded any other users of the folio.)
+        */
+       if (folio_evictable(folio)) {
+               if (was_unevictable)
+                       __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
+       } else {
+               folio_clear_active(folio);
+               folio_set_unevictable(folio);
+               /*
+                * folio->mlock_count = !!folio_test_mlocked(folio)?
+                * But that leaves __mlock_folio() in doubt whether another
+                * actor has already counted the mlock or not.  Err on the
+                * safe side, underestimate, let page reclaim fix it, rather
+                * than leaving a page on the unevictable LRU indefinitely.
+                */
+               folio->mlock_count = 0;
+               if (!was_unevictable)
+                       __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
        }
 
-       return seg;
+       lruvec_add_folio(lruvec, folio);
+       trace_mm_lru_insertion(folio);
 }
-EXPORT_SYMBOL_GPL(get_kernel_pages);
 
-static void pagevec_lru_move_fn(struct pagevec *pvec,
-       void (*move_fn)(struct page *page, struct lruvec *lruvec))
+static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
 {
        int i;
        struct lruvec *lruvec = NULL;
        unsigned long flags = 0;
 
-       for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
-               struct folio *folio = page_folio(page);
+       for (i = 0; i < folio_batch_count(fbatch); i++) {
+               struct folio *folio = fbatch->folios[i];
 
-               /* block memcg migration during page moving between lru */
-               if (!TestClearPageLRU(page))
+               /* block memcg migration while the folio moves between lru */
+               if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
                        continue;
 
-               lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
-               (*move_fn)(page, lruvec);
+               folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
+               move_fn(lruvec, folio);
 
-               SetPageLRU(page);
+               folio_set_lru(folio);
        }
+
        if (lruvec)
                unlock_page_lruvec_irqrestore(lruvec, flags);
-       release_pages(pvec->pages, pvec->nr);
-       pagevec_reinit(pvec);
+       folios_put(fbatch);
 }
 
-static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
+static void folio_batch_add_and_move(struct folio_batch *fbatch,
+               struct folio *folio, move_fn_t move_fn)
 {
-       struct folio *folio = page_folio(page);
+       if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
+           !lru_cache_disabled())
+               return;
+       folio_batch_move_lru(fbatch, move_fn);
+}
 
+static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
+{
        if (!folio_test_unevictable(folio)) {
                lruvec_del_folio(lruvec, folio);
                folio_clear_active(folio);
@@ -226,18 +259,6 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
        }
 }
 
-/* return true if pagevec needs to drain */
-static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
-{
-       bool ret = false;
-
-       if (!pagevec_add(pvec, page) || PageCompound(page) ||
-                       lru_cache_disabled())
-               ret = true;
-
-       return ret;
-}
-
 /*
  * Writeback is about to end against a folio which has been marked for
  * immediate reclaim.  If it still appears to be reclaimable, move it
@@ -249,20 +270,31 @@ void folio_rotate_reclaimable(struct folio *folio)
 {
        if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
            !folio_test_unevictable(folio) && folio_test_lru(folio)) {
-               struct pagevec *pvec;
+               struct folio_batch *fbatch;
                unsigned long flags;
 
                folio_get(folio);
                local_lock_irqsave(&lru_rotate.lock, flags);
-               pvec = this_cpu_ptr(&lru_rotate.pvec);
-               if (pagevec_add_and_need_flush(pvec, &folio->page))
-                       pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
+               fbatch = this_cpu_ptr(&lru_rotate.fbatch);
+               folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
                local_unlock_irqrestore(&lru_rotate.lock, flags);
        }
 }
 
-void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
+void lru_note_cost(struct lruvec *lruvec, bool file,
+                  unsigned int nr_io, unsigned int nr_rotated)
 {
+       unsigned long cost;
+
+       /*
+        * Reflect the relative cost of incurring IO and spending CPU
+        * time on rotations. This doesn't attempt to make a precise
+        * comparison, it just says: if reloads are about comparable
+        * between the LRU lists, or rotations are overwhelmingly
+        * different between them, adjust scan balance for CPU work.
+        */
+       cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated;
+
        do {
                unsigned long lrusize;
 
@@ -276,9 +308,9 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
                spin_lock_irq(&lruvec->lru_lock);
                /* Record cost event */
                if (file)
-                       lruvec->file_cost += nr_pages;
+                       lruvec->file_cost += cost;
                else
-                       lruvec->anon_cost += nr_pages;
+                       lruvec->anon_cost += cost;
 
                /*
                 * Decay previous events
@@ -301,13 +333,13 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
        } while ((lruvec = parent_lruvec(lruvec)));
 }
 
-void lru_note_cost_folio(struct folio *folio)
+void lru_note_cost_refault(struct folio *folio)
 {
        lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
-                       folio_nr_pages(folio));
+                     folio_nr_pages(folio), 0);
 }
 
-static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
+static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
 {
        if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
                long nr_pages = folio_nr_pages(folio);
@@ -324,51 +356,40 @@ static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
 }
 
 #ifdef CONFIG_SMP
-static void __activate_page(struct page *page, struct lruvec *lruvec)
-{
-       return __folio_activate(page_folio(page), lruvec);
-}
-
-static void activate_page_drain(int cpu)
+static void folio_activate_drain(int cpu)
 {
-       struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
-
-       if (pagevec_count(pvec))
-               pagevec_lru_move_fn(pvec, __activate_page);
-}
+       struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
 
-static bool need_activate_page_drain(int cpu)
-{
-       return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
+       if (folio_batch_count(fbatch))
+               folio_batch_move_lru(fbatch, folio_activate_fn);
 }
 
-static void folio_activate(struct folio *folio)
+void folio_activate(struct folio *folio)
 {
        if (folio_test_lru(folio) && !folio_test_active(folio) &&
            !folio_test_unevictable(folio)) {
-               struct pagevec *pvec;
+               struct folio_batch *fbatch;
 
                folio_get(folio);
-               local_lock(&lru_pvecs.lock);
-               pvec = this_cpu_ptr(&lru_pvecs.activate_page);
-               if (pagevec_add_and_need_flush(pvec, &folio->page))
-                       pagevec_lru_move_fn(pvec, __activate_page);
-               local_unlock(&lru_pvecs.lock);
+               local_lock(&cpu_fbatches.lock);
+               fbatch = this_cpu_ptr(&cpu_fbatches.activate);
+               folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
+               local_unlock(&cpu_fbatches.lock);
        }
 }
 
 #else
-static inline void activate_page_drain(int cpu)
+static inline void folio_activate_drain(int cpu)
 {
 }
 
-static void folio_activate(struct folio *folio)
+void folio_activate(struct folio *folio)
 {
        struct lruvec *lruvec;
 
        if (folio_test_clear_lru(folio)) {
                lruvec = folio_lruvec_lock_irq(folio);
-               __folio_activate(folio, lruvec);
+               folio_activate_fn(lruvec, folio);
                unlock_page_lruvec_irq(lruvec);
                folio_set_lru(folio);
        }
@@ -377,33 +398,67 @@ static void folio_activate(struct folio *folio)
 
 static void __lru_cache_activate_folio(struct folio *folio)
 {
-       struct pagevec *pvec;
+       struct folio_batch *fbatch;
        int i;
 
-       local_lock(&lru_pvecs.lock);
-       pvec = this_cpu_ptr(&lru_pvecs.lru_add);
+       local_lock(&cpu_fbatches.lock);
+       fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
 
        /*
-        * Search backwards on the optimistic assumption that the page being
-        * activated has just been added to this pagevec. Note that only
-        * the local pagevec is examined as a !PageLRU page could be in the
+        * Search backwards on the optimistic assumption that the folio being
+        * activated has just been added to this batch. Note that only
+        * the local batch is examined as a !LRU folio could be in the
         * process of being released, reclaimed, migrated or on a remote
-        * pagevec that is currently being drained. Furthermore, marking
-        * a remote pagevec's page PageActive potentially hits a race where
-        * a page is marked PageActive just after it is added to the inactive
+        * batch that is currently being drained. Furthermore, marking
+        * a remote batch's folio active potentially hits a race where
+        * a folio is marked active just after it is added to the inactive
         * list causing accounting errors and BUG_ON checks to trigger.
         */
-       for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
-               struct page *pagevec_page = pvec->pages[i];
+       for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
+               struct folio *batch_folio = fbatch->folios[i];
 
-               if (pagevec_page == &folio->page) {
+               if (batch_folio == folio) {
                        folio_set_active(folio);
                        break;
                }
        }
 
-       local_unlock(&lru_pvecs.lock);
+       local_unlock(&cpu_fbatches.lock);
+}
+
+#ifdef CONFIG_LRU_GEN
+static void folio_inc_refs(struct folio *folio)
+{
+       unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
+
+       if (folio_test_unevictable(folio))
+               return;
+
+       if (!folio_test_referenced(folio)) {
+               folio_set_referenced(folio);
+               return;
+       }
+
+       if (!folio_test_workingset(folio)) {
+               folio_set_workingset(folio);
+               return;
+       }
+
+       /* see the comment on MAX_NR_TIERS */
+       do {
+               new_flags = old_flags & LRU_REFS_MASK;
+               if (new_flags == LRU_REFS_MASK)
+                       break;
+
+               new_flags += BIT(LRU_REFS_PGOFF);
+               new_flags |= old_flags & ~LRU_REFS_MASK;
+       } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
 }
+#else
+static void folio_inc_refs(struct folio *folio)
+{
+}
+#endif /* CONFIG_LRU_GEN */
 
 /*
  * Mark a page as having seen activity.
@@ -417,6 +472,11 @@ static void __lru_cache_activate_folio(struct folio *folio)
  */
 void folio_mark_accessed(struct folio *folio)
 {
+       if (lru_gen_enabled()) {
+               folio_inc_refs(folio);
+               return;
+       }
+
        if (!folio_test_referenced(folio)) {
                folio_set_referenced(folio);
        } else if (folio_test_unevictable(folio)) {
@@ -427,9 +487,9 @@ void folio_mark_accessed(struct folio *folio)
                 */
        } else if (!folio_test_active(folio)) {
                /*
-                * If the page is on the LRU, queue it for activation via
-                * lru_pvecs.activate_page. Otherwise, assume the page is on a
-                * pagevec, mark it active and it'll be moved to the active
+                * If the folio is on the LRU, queue it for activation via
+                * cpu_fbatches.activate. Otherwise, assume the folio is in a
+                * folio_batch, mark it active and it'll be moved to the active
                 * LRU on the next drain.
                 */
                if (folio_test_lru(folio))
@@ -450,95 +510,100 @@ EXPORT_SYMBOL(folio_mark_accessed);
  *
  * Queue the folio for addition to the LRU. The decision on whether
  * to add the page to the [in]active [file|anon] list is deferred until the
- * pagevec is drained. This gives a chance for the caller of folio_add_lru()
+ * folio_batch is drained. This gives a chance for the caller of folio_add_lru()
  * have the folio added to the active list using folio_mark_accessed().
  */
 void folio_add_lru(struct folio *folio)
 {
-       struct pagevec *pvec;
+       struct folio_batch *fbatch;
 
-       VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
+       VM_BUG_ON_FOLIO(folio_test_active(folio) &&
+                       folio_test_unevictable(folio), folio);
        VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
+       /* see the comment in lru_gen_add_folio() */
+       if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
+           lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
+               folio_set_active(folio);
+
        folio_get(folio);
-       local_lock(&lru_pvecs.lock);
-       pvec = this_cpu_ptr(&lru_pvecs.lru_add);
-       if (pagevec_add_and_need_flush(pvec, &folio->page))
-               __pagevec_lru_add(pvec);
-       local_unlock(&lru_pvecs.lock);
+       local_lock(&cpu_fbatches.lock);
+       fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
+       folio_batch_add_and_move(fbatch, folio, lru_add_fn);
+       local_unlock(&cpu_fbatches.lock);
 }
 EXPORT_SYMBOL(folio_add_lru);
 
 /**
- * lru_cache_add_inactive_or_unevictable
- * @page:  the page to be added to LRU
- * @vma:   vma in which page is mapped for determining reclaimability
+ * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
+ * @folio: The folio to be added to the LRU.
+ * @vma: VMA in which the folio is mapped.
  *
- * Place @page on the inactive or unevictable LRU list, depending on its
- * evictability.
+ * If the VMA is mlocked, @folio is added to the unevictable list.
+ * Otherwise, it is treated the same way as folio_add_lru().
  */
-void lru_cache_add_inactive_or_unevictable(struct page *page,
-                                        struct vm_area_struct *vma)
+void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
 {
-       VM_BUG_ON_PAGE(PageLRU(page), page);
+       VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
 
        if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
-               mlock_new_page(page);
+               mlock_new_folio(folio);
        else
-               lru_cache_add(page);
+               folio_add_lru(folio);
 }
 
 /*
- * If the page can not be invalidated, it is moved to the
+ * If the folio cannot be invalidated, it is moved to the
  * inactive list to speed up its reclaim.  It is moved to the
  * head of the list, rather than the tail, to give the flusher
  * threads some time to write it out, as this is much more
  * effective than the single-page writeout from reclaim.
  *
- * If the page isn't page_mapped and dirty/writeback, the page
- * could reclaim asap using PG_reclaim.
+ * If the folio isn't mapped and dirty/writeback, the folio
+ * could be reclaimed asap using the reclaim flag.
  *
- * 1. active, mapped page -> none
- * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
- * 3. inactive, mapped page -> none
- * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
+ * 1. active, mapped folio -> none
+ * 2. active, dirty/writeback folio -> inactive, head, reclaim
+ * 3. inactive, mapped folio -> none
+ * 4. inactive, dirty/writeback folio -> inactive, head, reclaim
  * 5. inactive, clean -> inactive, tail
  * 6. Others -> none
  *
- * In 4, why it moves inactive's head, the VM expects the page would
- * be write it out by flusher threads as this is much more effective
+ * In 4, it moves to the head of the inactive list so the folio is
+ * written out by flusher threads as this is much more efficient
  * than the single-page writeout from reclaim.
  */
-static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
+static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
 {
-       bool active = PageActive(page);
-       int nr_pages = thp_nr_pages(page);
+       bool active = folio_test_active(folio);
+       long nr_pages = folio_nr_pages(folio);
 
-       if (PageUnevictable(page))
+       if (folio_test_unevictable(folio))
                return;
 
-       /* Some processes are using the page */
-       if (page_mapped(page))
+       /* Some processes are using the folio */
+       if (folio_mapped(folio))
                return;
 
-       del_page_from_lru_list(page, lruvec);
-       ClearPageActive(page);
-       ClearPageReferenced(page);
+       lruvec_del_folio(lruvec, folio);
+       folio_clear_active(folio);
+       folio_clear_referenced(folio);
 
-       if (PageWriteback(page) || PageDirty(page)) {
+       if (folio_test_writeback(folio) || folio_test_dirty(folio)) {
                /*
-                * PG_reclaim could be raced with end_page_writeback
-                * It can make readahead confusing.  But race window
-                * is _really_ small and  it's non-critical problem.
+                * Setting the reclaim flag could race with
+                * folio_end_writeback() and confuse readahead.  But the
+                * race window is _really_ small and  it's not a critical
+                * problem.
                 */
-               add_page_to_lru_list(page, lruvec);
-               SetPageReclaim(page);
+               lruvec_add_folio(lruvec, folio);
+               folio_set_reclaim(folio);
        } else {
                /*
-                * The page's writeback ends up during pagevec
-                * We move that page into tail of inactive.
+                * The folio's writeback ended while it was in the batch.
+                * We move that folio to the tail of the inactive list.
                 */
-               add_page_to_lru_list_tail(page, lruvec);
+               lruvec_add_folio_tail(lruvec, folio);
                __count_vm_events(PGROTATED, nr_pages);
        }
 
@@ -549,15 +614,15 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec)
        }
 }
 
-static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
+static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
 {
-       if (PageActive(page) && !PageUnevictable(page)) {
-               int nr_pages = thp_nr_pages(page);
+       if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) {
+               long nr_pages = folio_nr_pages(folio);
 
-               del_page_from_lru_list(page, lruvec);
-               ClearPageActive(page);
-               ClearPageReferenced(page);
-               add_page_to_lru_list(page, lruvec);
+               lruvec_del_folio(lruvec, folio);
+               folio_clear_active(folio);
+               folio_clear_referenced(folio);
+               lruvec_add_folio(lruvec, folio);
 
                __count_vm_events(PGDEACTIVATE, nr_pages);
                __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
@@ -565,22 +630,22 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec)
        }
 }
 
-static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
+static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
 {
-       if (PageAnon(page) && PageSwapBacked(page) &&
-           !PageSwapCache(page) && !PageUnevictable(page)) {
-               int nr_pages = thp_nr_pages(page);
+       if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+           !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
+               long nr_pages = folio_nr_pages(folio);
 
-               del_page_from_lru_list(page, lruvec);
-               ClearPageActive(page);
-               ClearPageReferenced(page);
+               lruvec_del_folio(lruvec, folio);
+               folio_clear_active(folio);
+               folio_clear_referenced(folio);
                /*
-                * Lazyfree pages are clean anonymous pages.  They have
-                * PG_swapbacked flag cleared, to distinguish them from normal
-                * anonymous pages
+                * Lazyfree folios are clean anonymous folios.  They have
+                * the swapbacked flag cleared, to distinguish them from normal
+                * anonymous folios
                 */
-               ClearPageSwapBacked(page);
-               add_page_to_lru_list(page, lruvec);
+               folio_clear_swapbacked(folio);
+               lruvec_add_folio(lruvec, folio);
 
                __count_vm_events(PGLAZYFREE, nr_pages);
                __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
@@ -589,123 +654,119 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec)
 }
 
 /*
- * Drain pages out of the cpu's pagevecs.
+ * Drain pages out of the cpu's folio_batch.
  * Either "cpu" is the current CPU, and preemption has already been
  * disabled; or "cpu" is being hot-unplugged, and is already dead.
  */
 void lru_add_drain_cpu(int cpu)
 {
-       struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
+       struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+       struct folio_batch *fbatch = &fbatches->lru_add;
 
-       if (pagevec_count(pvec))
-               __pagevec_lru_add(pvec);
+       if (folio_batch_count(fbatch))
+               folio_batch_move_lru(fbatch, lru_add_fn);
 
-       pvec = &per_cpu(lru_rotate.pvec, cpu);
+       fbatch = &per_cpu(lru_rotate.fbatch, cpu);
        /* Disabling interrupts below acts as a compiler barrier. */
-       if (data_race(pagevec_count(pvec))) {
+       if (data_race(folio_batch_count(fbatch))) {
                unsigned long flags;
 
                /* No harm done if a racing interrupt already did this */
                local_lock_irqsave(&lru_rotate.lock, flags);
-               pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
+               folio_batch_move_lru(fbatch, lru_move_tail_fn);
                local_unlock_irqrestore(&lru_rotate.lock, flags);
        }
 
-       pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
-       if (pagevec_count(pvec))
-               pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
+       fbatch = &fbatches->lru_deactivate_file;
+       if (folio_batch_count(fbatch))
+               folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
 
-       pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
-       if (pagevec_count(pvec))
-               pagevec_lru_move_fn(pvec, lru_deactivate_fn);
+       fbatch = &fbatches->lru_deactivate;
+       if (folio_batch_count(fbatch))
+               folio_batch_move_lru(fbatch, lru_deactivate_fn);
 
-       pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
-       if (pagevec_count(pvec))
-               pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
+       fbatch = &fbatches->lru_lazyfree;
+       if (folio_batch_count(fbatch))
+               folio_batch_move_lru(fbatch, lru_lazyfree_fn);
 
-       activate_page_drain(cpu);
+       folio_activate_drain(cpu);
 }
 
 /**
- * deactivate_file_folio() - Forcefully deactivate a file folio.
+ * deactivate_file_folio() - Deactivate a file folio.
  * @folio: Folio to deactivate.
  *
  * This function hints to the VM that @folio is a good reclaim candidate,
  * for example if its invalidation fails due to the folio being dirty
  * or under writeback.
  *
- * Context: Caller holds a reference on the page.
+ * Context: Caller holds a reference on the folio.
  */
 void deactivate_file_folio(struct folio *folio)
 {
-       struct pagevec *pvec;
+       struct folio_batch *fbatch;
 
-       /*
-        * In a workload with many unevictable pages such as mprotect,
-        * unevictable folio deactivation for accelerating reclaim is pointless.
-        */
+       /* Deactivating an unevictable folio will not accelerate reclaim */
        if (folio_test_unevictable(folio))
                return;
 
        folio_get(folio);
-       local_lock(&lru_pvecs.lock);
-       pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
-
-       if (pagevec_add_and_need_flush(pvec, &folio->page))
-               pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
-       local_unlock(&lru_pvecs.lock);
+       local_lock(&cpu_fbatches.lock);
+       fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
+       folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
+       local_unlock(&cpu_fbatches.lock);
 }
 
 /*
- * deactivate_page - deactivate a page
- * @page: page to deactivate
+ * folio_deactivate - deactivate a folio
+ * @folio: folio to deactivate
  *
- * deactivate_page() moves @page to the inactive list if @page was on the active
- * list and was not an unevictable page.  This is done to accelerate the reclaim
- * of @page.
+ * folio_deactivate() moves @folio to the inactive list if @folio was on the
+ * active list and was not unevictable. This is done to accelerate the
+ * reclaim of @folio.
  */
-void deactivate_page(struct page *page)
+void folio_deactivate(struct folio *folio)
 {
-       if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
-               struct pagevec *pvec;
-
-               local_lock(&lru_pvecs.lock);
-               pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
-               get_page(page);
-               if (pagevec_add_and_need_flush(pvec, page))
-                       pagevec_lru_move_fn(pvec, lru_deactivate_fn);
-               local_unlock(&lru_pvecs.lock);
+       if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
+           (folio_test_active(folio) || lru_gen_enabled())) {
+               struct folio_batch *fbatch;
+
+               folio_get(folio);
+               local_lock(&cpu_fbatches.lock);
+               fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
+               folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
+               local_unlock(&cpu_fbatches.lock);
        }
 }
 
 /**
- * mark_page_lazyfree - make an anon page lazyfree
- * @page: page to deactivate
+ * folio_mark_lazyfree - make an anon folio lazyfree
+ * @folio: folio to deactivate
  *
- * mark_page_lazyfree() moves @page to the inactive file list.
- * This is done to accelerate the reclaim of @page.
+ * folio_mark_lazyfree() moves @folio to the inactive file list.
+ * This is done to accelerate the reclaim of @folio.
  */
-void mark_page_lazyfree(struct page *page)
+void folio_mark_lazyfree(struct folio *folio)
 {
-       if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
-           !PageSwapCache(page) && !PageUnevictable(page)) {
-               struct pagevec *pvec;
-
-               local_lock(&lru_pvecs.lock);
-               pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
-               get_page(page);
-               if (pagevec_add_and_need_flush(pvec, page))
-                       pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
-               local_unlock(&lru_pvecs.lock);
+       if (folio_test_lru(folio) && folio_test_anon(folio) &&
+           folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
+           !folio_test_unevictable(folio)) {
+               struct folio_batch *fbatch;
+
+               folio_get(folio);
+               local_lock(&cpu_fbatches.lock);
+               fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
+               folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
+               local_unlock(&cpu_fbatches.lock);
        }
 }
 
 void lru_add_drain(void)
 {
-       local_lock(&lru_pvecs.lock);
+       local_lock(&cpu_fbatches.lock);
        lru_add_drain_cpu(smp_processor_id());
-       local_unlock(&lru_pvecs.lock);
-       mlock_page_drain_local();
+       local_unlock(&cpu_fbatches.lock);
+       mlock_drain_local();
 }
 
 /*
@@ -716,20 +777,20 @@ void lru_add_drain(void)
  */
 static void lru_add_and_bh_lrus_drain(void)
 {
-       local_lock(&lru_pvecs.lock);
+       local_lock(&cpu_fbatches.lock);
        lru_add_drain_cpu(smp_processor_id());
-       local_unlock(&lru_pvecs.lock);
+       local_unlock(&cpu_fbatches.lock);
        invalidate_bh_lrus_cpu();
-       mlock_page_drain_local();
+       mlock_drain_local();
 }
 
 void lru_add_drain_cpu_zone(struct zone *zone)
 {
-       local_lock(&lru_pvecs.lock);
+       local_lock(&cpu_fbatches.lock);
        lru_add_drain_cpu(smp_processor_id());
        drain_local_pages(zone);
-       local_unlock(&lru_pvecs.lock);
-       mlock_page_drain_local();
+       local_unlock(&cpu_fbatches.lock);
+       mlock_drain_local();
 }
 
 #ifdef CONFIG_SMP
@@ -741,6 +802,21 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
        lru_add_and_bh_lrus_drain();
 }
 
+static bool cpu_needs_drain(unsigned int cpu)
+{
+       struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu);
+
+       /* Check these in order of likelihood that they're not zero */
+       return folio_batch_count(&fbatches->lru_add) ||
+               data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
+               folio_batch_count(&fbatches->lru_deactivate_file) ||
+               folio_batch_count(&fbatches->lru_deactivate) ||
+               folio_batch_count(&fbatches->lru_lazyfree) ||
+               folio_batch_count(&fbatches->activate) ||
+               need_mlock_drain(cpu) ||
+               has_bh_in_lru(cpu, NULL);
+}
+
 /*
  * Doesn't need any cpu hotplug locking because we do rely on per-cpu
  * kworkers being shut down before our page_alloc_cpu_dead callback is
@@ -773,8 +849,9 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
                return;
 
        /*
-        * Guarantee pagevec counter stores visible by this CPU are visible to
-        * other CPUs before loading the current drain generation.
+        * Guarantee folio_batch counter stores visible by this CPU
+        * are visible to other CPUs before loading the current drain
+        * generation.
         */
        smp_mb();
 
@@ -800,8 +877,9 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
         * (D) Increment global generation number
         *
         * Pairs with smp_load_acquire() at (B), outside of the critical
-        * section. Use a full memory barrier to guarantee that the new global
-        * drain generation number is stored before loading pagevec counters.
+        * section. Use a full memory barrier to guarantee that the
+        * new global drain generation number is stored before loading
+        * folio_batch counters.
         *
         * This pairing must be done here, before the for_each_online_cpu loop
         * below which drains the page vectors.
@@ -823,14 +901,7 @@ static inline void __lru_add_drain_all(bool force_all_cpus)
        for_each_online_cpu(cpu) {
                struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 
-               if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
-                   data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
-                   pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
-                   pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
-                   pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
-                   need_activate_page_drain(cpu) ||
-                   need_mlock_page_drain(cpu) ||
-                   has_bh_in_lru(cpu, NULL)) {
+               if (cpu_needs_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
                        queue_work_on(cpu, mm_percpu_wq, work);
                        __cpumask_set_cpu(cpu, &has_work);
@@ -890,180 +961,139 @@ void lru_cache_disable(void)
 }
 
 /**
- * release_pages - batched put_page()
- * @pages: array of pages to release
- * @nr: number of pages
+ * folios_put_refs - Reduce the reference count on a batch of folios.
+ * @folios: The folios.
+ * @refs: The number of refs to subtract from each folio.
  *
- * Decrement the reference count on all the pages in @pages.  If it
- * fell to zero, remove the page from the LRU and free it.
+ * Like folio_put(), but for a batch of folios.  This is more efficient
+ * than writing the loop yourself as it will optimise the locks which need
+ * to be taken if the folios are freed.  The folios batch is returned
+ * empty and ready to be reused for another batch; there is no need
+ * to reinitialise it.  If @refs is NULL, we subtract one from each
+ * folio refcount.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context.  May be called while holding a spinlock.
  */
-void release_pages(struct page **pages, int nr)
+void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
 {
-       int i;
-       LIST_HEAD(pages_to_free);
+       int i, j;
        struct lruvec *lruvec = NULL;
        unsigned long flags = 0;
-       unsigned int lock_batch;
-
-       for (i = 0; i < nr; i++) {
-               struct page *page = pages[i];
-               struct folio *folio = page_folio(page);
 
-               /*
-                * Make sure the IRQ-safe lock-holding time does not get
-                * excessive with a continuous string of pages from the
-                * same lruvec. The lock is held only if lruvec != NULL.
-                */
-               if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) {
-                       unlock_page_lruvec_irqrestore(lruvec, flags);
-                       lruvec = NULL;
-               }
+       for (i = 0, j = 0; i < folios->nr; i++) {
+               struct folio *folio = folios->folios[i];
+               unsigned int nr_refs = refs ? refs[i] : 1;
 
-               page = &folio->page;
-               if (is_huge_zero_page(page))
+               if (is_huge_zero_page(&folio->page))
                        continue;
 
-               if (is_zone_device_page(page)) {
+               if (folio_is_zone_device(folio)) {
                        if (lruvec) {
                                unlock_page_lruvec_irqrestore(lruvec, flags);
                                lruvec = NULL;
                        }
-                       if (put_devmap_managed_page(page))
+                       if (put_devmap_managed_page_refs(&folio->page, nr_refs))
                                continue;
-                       if (put_page_testzero(page))
-                               free_zone_device_page(page);
+                       if (folio_ref_sub_and_test(folio, nr_refs))
+                               free_zone_device_page(&folio->page);
                        continue;
                }
 
-               if (!put_page_testzero(page))
+               if (!folio_ref_sub_and_test(folio, nr_refs))
                        continue;
 
-               if (PageCompound(page)) {
+               /* hugetlb has its own memcg */
+               if (folio_test_hugetlb(folio)) {
                        if (lruvec) {
                                unlock_page_lruvec_irqrestore(lruvec, flags);
                                lruvec = NULL;
                        }
-                       __put_compound_page(page);
+                       free_huge_folio(folio);
                        continue;
                }
+               if (folio_test_large(folio) &&
+                   folio_test_large_rmappable(folio))
+                       folio_undo_large_rmappable(folio);
 
-               if (PageLRU(page)) {
-                       struct lruvec *prev_lruvec = lruvec;
+               __page_cache_release(folio, &lruvec, &flags);
 
-                       lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
-                                                                       &flags);
-                       if (prev_lruvec != lruvec)
-                               lock_batch = 0;
-
-                       del_page_from_lru_list(page, lruvec);
-                       __clear_page_lru_flags(page);
-               }
-
-               /*
-                * In rare cases, when truncation or holepunching raced with
-                * munlock after VM_LOCKED was cleared, Mlocked may still be
-                * found set here.  This does not indicate a problem, unless
-                * "unevictable_pgs_cleared" appears worryingly large.
-                */
-               if (unlikely(PageMlocked(page))) {
-                       __ClearPageMlocked(page);
-                       dec_zone_page_state(page, NR_MLOCK);
-                       count_vm_event(UNEVICTABLE_PGCLEARED);
-               }
-
-               list_add(&page->lru, &pages_to_free);
+               if (j != i)
+                       folios->folios[j] = folio;
+               j++;
        }
        if (lruvec)
                unlock_page_lruvec_irqrestore(lruvec, flags);
+       if (!j) {
+               folio_batch_reinit(folios);
+               return;
+       }
 
-       mem_cgroup_uncharge_list(&pages_to_free);
-       free_unref_page_list(&pages_to_free);
+       folios->nr = j;
+       mem_cgroup_uncharge_folios(folios);
+       free_unref_folios(folios);
 }
-EXPORT_SYMBOL(release_pages);
+EXPORT_SYMBOL(folios_put_refs);
 
-/*
- * The pages which we're about to release may be in the deferred lru-addition
- * queues.  That would prevent them from really being freed right now.  That's
- * OK from a correctness point of view but is inefficient - those pages may be
- * cache-warm and we want to give them back to the page allocator ASAP.
+/**
+ * release_pages - batched put_page()
+ * @arg: array of pages to release
+ * @nr: number of pages
  *
- * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
- * and __pagevec_lru_add_active() call release_pages() directly to avoid
- * mutual recursion.
+ * Decrement the reference count on all the pages in @arg.  If it
+ * fell to zero, remove the page from the LRU and free it.
+ *
+ * Note that the argument can be an array of pages, encoded pages,
+ * or folio pointers. We ignore any encoded bits, and turn any of
+ * them into just a folio that gets free'd.
  */
-void __pagevec_release(struct pagevec *pvec)
+void release_pages(release_pages_arg arg, int nr)
 {
-       if (!pvec->percpu_pvec_drained) {
-               lru_add_drain();
-               pvec->percpu_pvec_drained = true;
-       }
-       release_pages(pvec->pages, pagevec_count(pvec));
-       pagevec_reinit(pvec);
-}
-EXPORT_SYMBOL(__pagevec_release);
+       struct folio_batch fbatch;
+       int refs[PAGEVEC_SIZE];
+       struct encoded_page **encoded = arg.encoded_pages;
+       int i;
 
-static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
-{
-       int was_unevictable = folio_test_clear_unevictable(folio);
-       long nr_pages = folio_nr_pages(folio);
+       folio_batch_init(&fbatch);
+       for (i = 0; i < nr; i++) {
+               /* Turn any of the argument types into a folio */
+               struct folio *folio = page_folio(encoded_page_ptr(encoded[i]));
 
-       VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+               /* Is our next entry actually "nr_pages" -> "nr_refs" ? */
+               refs[fbatch.nr] = 1;
+               if (unlikely(encoded_page_flags(encoded[i]) &
+                            ENCODED_PAGE_BIT_NR_PAGES_NEXT))
+                       refs[fbatch.nr] = encoded_nr_pages(encoded[++i]);
 
-       folio_set_lru(folio);
-       /*
-        * Is an smp_mb__after_atomic() still required here, before
-        * folio_evictable() tests PageMlocked, to rule out the possibility
-        * of stranding an evictable folio on an unevictable LRU?  I think
-        * not, because __munlock_page() only clears PageMlocked while the LRU
-        * lock is held.
-        *
-        * (That is not true of __page_cache_release(), and not necessarily
-        * true of release_pages(): but those only clear PageMlocked after
-        * put_page_testzero() has excluded any other users of the page.)
-        */
-       if (folio_evictable(folio)) {
-               if (was_unevictable)
-                       __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
-       } else {
-               folio_clear_active(folio);
-               folio_set_unevictable(folio);
-               /*
-                * folio->mlock_count = !!folio_test_mlocked(folio)?
-                * But that leaves __mlock_page() in doubt whether another
-                * actor has already counted the mlock or not.  Err on the
-                * safe side, underestimate, let page reclaim fix it, rather
-                * than leaving a page on the unevictable LRU indefinitely.
-                */
-               folio->mlock_count = 0;
-               if (!was_unevictable)
-                       __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
+               if (folio_batch_add(&fbatch, folio) > 0)
+                       continue;
+               folios_put_refs(&fbatch, refs);
        }
 
-       lruvec_add_folio(lruvec, folio);
-       trace_mm_lru_insertion(folio);
+       if (fbatch.nr)
+               folios_put_refs(&fbatch, refs);
 }
+EXPORT_SYMBOL(release_pages);
 
 /*
- * Add the passed pages to the LRU, then drop the caller's refcount
- * on them.  Reinitialises the caller's pagevec.
+ * The folios which we're about to release may be in the deferred lru-addition
+ * queues.  That would prevent them from really being freed right now.  That's
+ * OK from a correctness point of view but is inefficient - those folios may be
+ * cache-warm and we want to give them back to the page allocator ASAP.
+ *
+ * So __folio_batch_release() will drain those queues here.
+ * folio_batch_move_lru() calls folios_put() directly to avoid
+ * mutual recursion.
  */
-void __pagevec_lru_add(struct pagevec *pvec)
+void __folio_batch_release(struct folio_batch *fbatch)
 {
-       int i;
-       struct lruvec *lruvec = NULL;
-       unsigned long flags = 0;
-
-       for (i = 0; i < pagevec_count(pvec); i++) {
-               struct folio *folio = page_folio(pvec->pages[i]);
-
-               lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
-               __pagevec_lru_add_fn(folio, lruvec);
+       if (!fbatch->percpu_pvec_drained) {
+               lru_add_drain();
+               fbatch->percpu_pvec_drained = true;
        }
-       if (lruvec)
-               unlock_page_lruvec_irqrestore(lruvec, flags);
-       release_pages(pvec->pages, pvec->nr);
-       pagevec_reinit(pvec);
+       folios_put(fbatch);
 }
+EXPORT_SYMBOL(__folio_batch_release);
 
 /**
  * folio_batch_remove_exceptionals() - Prune non-folios from a batch.
@@ -1086,16 +1116,6 @@ void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
        fbatch->nr = j;
 }
 
-unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
-               struct address_space *mapping, pgoff_t *index, pgoff_t end,
-               xa_mark_t tag)
-{
-       pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
-                                       PAGEVEC_SIZE, pvec->pages);
-       return pagevec_count(pvec);
-}
-EXPORT_SYMBOL(pagevec_lookup_range_tag);
-
 /*
  * Perform any setup for the swap system
  */