typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
+static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
+{
+ int was_unevictable = folio_test_clear_unevictable(folio);
+ long nr_pages = folio_nr_pages(folio);
+
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+
+ folio_set_lru(folio);
+ /*
+ * Is an smp_mb__after_atomic() still required here, before
+ * folio_evictable() tests PageMlocked, to rule out the possibility
+ * of stranding an evictable folio on an unevictable LRU? I think
+ * not, because __munlock_page() only clears PageMlocked while the LRU
+ * lock is held.
+ *
+ * (That is not true of __page_cache_release(), and not necessarily
+ * true of release_pages(): but those only clear PageMlocked after
+ * put_page_testzero() has excluded any other users of the page.)
+ */
+ if (folio_evictable(folio)) {
+ if (was_unevictable)
+ __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
+ } else {
+ folio_clear_active(folio);
+ folio_set_unevictable(folio);
+ /*
+ * folio->mlock_count = !!folio_test_mlocked(folio)?
+ * But that leaves __mlock_page() in doubt whether another
+ * actor has already counted the mlock or not. Err on the
+ * safe side, underestimate, let page reclaim fix it, rather
+ * than leaving a page on the unevictable LRU indefinitely.
+ */
+ folio->mlock_count = 0;
+ if (!was_unevictable)
+ __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
+ }
+
+ lruvec_add_folio(lruvec, folio);
+ trace_mm_lru_insertion(folio);
+}
+
+/*
+ * Add the passed pages to the LRU, then drop the caller's refcount
+ * on them. Reinitialises the caller's pagevec.
+ */
+static void __pagevec_lru_add(struct pagevec *pvec)
+{
+ int i;
+ struct lruvec *lruvec = NULL;
+ unsigned long flags = 0;
+
+ for (i = 0; i < pagevec_count(pvec); i++) {
+ struct folio *folio = page_folio(pvec->pages[i]);
+
+ lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
+ __pagevec_lru_add_fn(folio, lruvec);
+ }
+ if (lruvec)
+ unlock_page_lruvec_irqrestore(lruvec, flags);
+ release_pages(pvec->pages, pvec->nr);
+ pagevec_reinit(pvec);
+}
+
static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
{
int i;
}
EXPORT_SYMBOL(__pagevec_release);
-static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
-{
- int was_unevictable = folio_test_clear_unevictable(folio);
- long nr_pages = folio_nr_pages(folio);
-
- VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
-
- folio_set_lru(folio);
- /*
- * Is an smp_mb__after_atomic() still required here, before
- * folio_evictable() tests PageMlocked, to rule out the possibility
- * of stranding an evictable folio on an unevictable LRU? I think
- * not, because __munlock_page() only clears PageMlocked while the LRU
- * lock is held.
- *
- * (That is not true of __page_cache_release(), and not necessarily
- * true of release_pages(): but those only clear PageMlocked after
- * put_page_testzero() has excluded any other users of the page.)
- */
- if (folio_evictable(folio)) {
- if (was_unevictable)
- __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
- } else {
- folio_clear_active(folio);
- folio_set_unevictable(folio);
- /*
- * folio->mlock_count = !!folio_test_mlocked(folio)?
- * But that leaves __mlock_page() in doubt whether another
- * actor has already counted the mlock or not. Err on the
- * safe side, underestimate, let page reclaim fix it, rather
- * than leaving a page on the unevictable LRU indefinitely.
- */
- folio->mlock_count = 0;
- if (!was_unevictable)
- __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
- }
-
- lruvec_add_folio(lruvec, folio);
- trace_mm_lru_insertion(folio);
-}
-
-/*
- * Add the passed pages to the LRU, then drop the caller's refcount
- * on them. Reinitialises the caller's pagevec.
- */
-void __pagevec_lru_add(struct pagevec *pvec)
-{
- int i;
- struct lruvec *lruvec = NULL;
- unsigned long flags = 0;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct folio *folio = page_folio(pvec->pages[i]);
-
- lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
- __pagevec_lru_add_fn(folio, lruvec);
- }
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- release_pages(pvec->pages, pvec->nr);
- pagevec_reinit(pvec);
-}
-
/**
* folio_batch_remove_exceptionals() - Prune non-folios from a batch.
* @fbatch: The batch to prune