vsock/virtio: Fix unsigned integer wrap around in virtio_transport_has_space()
[linux-2.6-microblaze.git] / mm / migrate.c
index 2053b54..35a8833 100644 (file)
@@ -157,8 +157,8 @@ void putback_movable_pages(struct list_head *l)
                list_del(&folio->lru);
                /*
                 * We isolated non-lru movable folio so here we can use
-                * __PageMovable because LRU folio's mapping cannot have
-                * PAGE_MAPPING_MOVABLE.
+                * __folio_test_movable because LRU folio's mapping cannot
+                * have PAGE_MAPPING_MOVABLE.
                 */
                if (unlikely(__folio_test_movable(folio))) {
                        VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
@@ -249,7 +249,7 @@ static bool remove_migration_pte(struct folio *folio,
 
                        pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
                        if (folio_test_anon(folio))
-                               hugepage_add_anon_rmap(new, vma, pvmw.address,
+                               hugepage_add_anon_rmap(folio, vma, pvmw.address,
                                                       rmap_flags);
                        else
                                page_dup_file_rmap(new, true);
@@ -524,7 +524,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
        int expected_count;
 
        xas_lock_irq(&xas);
-       expected_count = 2 + folio_has_private(src);
+       expected_count = folio_expected_refs(mapping, src);
        if (!folio_ref_freeze(src, expected_count)) {
                xas_unlock_irq(&xas);
                return -EAGAIN;
@@ -533,11 +533,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
        dst->index = src->index;
        dst->mapping = src->mapping;
 
-       folio_get(dst);
+       folio_ref_add(dst, folio_nr_pages(dst));
 
        xas_store(&xas, dst);
 
-       folio_ref_unfreeze(src, expected_count - 1);
+       folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
 
        xas_unlock_irq(&xas);
 
@@ -588,20 +588,20 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
         * Copy NUMA information to the new page, to prevent over-eager
         * future migrations of this same page.
         */
-       cpupid = page_cpupid_xchg_last(&folio->page, -1);
+       cpupid = folio_xchg_last_cpupid(folio, -1);
        /*
         * For memory tiering mode, when migrate between slow and fast
         * memory node, reset cpupid, because that is used to record
         * page access time in slow memory node.
         */
        if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
-               bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
-               bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
+               bool f_toptier = node_is_toptier(folio_nid(folio));
+               bool t_toptier = node_is_toptier(folio_nid(newfolio));
 
                if (f_toptier != t_toptier)
                        cpupid = -1;
        }
-       page_cpupid_xchg_last(&newfolio->page, cpupid);
+       folio_xchg_last_cpupid(newfolio, cpupid);
 
        folio_migrate_ksm(newfolio, folio);
        /*
@@ -633,8 +633,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
 
        folio_copy_owner(newfolio, folio);
 
-       if (!folio_test_hugetlb(folio))
-               mem_cgroup_migrate(folio, newfolio);
+       mem_cgroup_migrate(folio, newfolio);
 }
 EXPORT_SYMBOL(folio_migrate_flags);
 
@@ -946,7 +945,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
                                enum migrate_mode mode)
 {
        int rc = -EAGAIN;
-       bool is_lru = !__PageMovable(&src->page);
+       bool is_lru = !__folio_test_movable(src);
 
        VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
        VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
@@ -993,7 +992,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
         * src is freed; but stats require that PageAnon be left as PageAnon.
         */
        if (rc == MIGRATEPAGE_SUCCESS) {
-               if (__PageMovable(&src->page)) {
+               if (__folio_test_movable(src)) {
                        VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
 
                        /*
@@ -1028,22 +1027,28 @@ union migration_ptr {
        struct anon_vma *anon_vma;
        struct address_space *mapping;
 };
+
+enum {
+       PAGE_WAS_MAPPED = BIT(0),
+       PAGE_WAS_MLOCKED = BIT(1),
+};
+
 static void __migrate_folio_record(struct folio *dst,
-                                  unsigned long page_was_mapped,
+                                  unsigned long old_page_state,
                                   struct anon_vma *anon_vma)
 {
        union migration_ptr ptr = { .anon_vma = anon_vma };
        dst->mapping = ptr.mapping;
-       dst->private = (void *)page_was_mapped;
+       dst->private = (void *)old_page_state;
 }
 
 static void __migrate_folio_extract(struct folio *dst,
-                                  int *page_was_mappedp,
+                                  int *old_page_state,
                                   struct anon_vma **anon_vmap)
 {
        union migration_ptr ptr = { .mapping = dst->mapping };
        *anon_vmap = ptr.anon_vma;
-       *page_was_mappedp = (unsigned long)dst->private;
+       *old_page_state = (unsigned long)dst->private;
        dst->mapping = NULL;
        dst->private = NULL;
 }
@@ -1085,7 +1090,7 @@ static void migrate_folio_done(struct folio *src,
        /*
         * Compaction can migrate also non-LRU pages which are
         * not accounted to NR_ISOLATED_*. They can be recognized
-        * as __PageMovable
+        * as __folio_test_movable
         */
        if (likely(!__folio_test_movable(src)))
                mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
@@ -1104,9 +1109,9 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
 {
        struct folio *dst;
        int rc = -EAGAIN;
-       int page_was_mapped = 0;
+       int old_page_state = 0;
        struct anon_vma *anon_vma = NULL;
-       bool is_lru = !__PageMovable(&src->page);
+       bool is_lru = !__folio_test_movable(src);
        bool locked = false;
        bool dst_locked = false;
 
@@ -1158,6 +1163,8 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
                folio_lock(src);
        }
        locked = true;
+       if (folio_test_mlocked(src))
+               old_page_state |= PAGE_WAS_MLOCKED;
 
        if (folio_test_writeback(src)) {
                /*
@@ -1207,7 +1214,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
        dst_locked = true;
 
        if (unlikely(!is_lru)) {
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return MIGRATEPAGE_UNMAP;
        }
 
@@ -1233,11 +1240,11 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
                VM_BUG_ON_FOLIO(folio_test_anon(src) &&
                               !folio_test_ksm(src) && !anon_vma, src);
                try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
-               page_was_mapped = 1;
+               old_page_state |= PAGE_WAS_MAPPED;
        }
 
        if (!folio_mapped(src)) {
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return MIGRATEPAGE_UNMAP;
        }
 
@@ -1249,7 +1256,8 @@ out:
        if (rc == -EAGAIN)
                ret = NULL;
 
-       migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
+       migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+                              anon_vma, locked, ret);
        migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
 
        return rc;
@@ -1262,12 +1270,12 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
                              struct list_head *ret)
 {
        int rc;
-       int page_was_mapped = 0;
+       int old_page_state = 0;
        struct anon_vma *anon_vma = NULL;
-       bool is_lru = !__PageMovable(&src->page);
+       bool is_lru = !__folio_test_movable(src);
        struct list_head *prev;
 
-       __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
+       __migrate_folio_extract(dst, &old_page_state, &anon_vma);
        prev = dst->lru.prev;
        list_del(&dst->lru);
 
@@ -1288,10 +1296,10 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
         * isolated from the unevictable LRU: but this case is the easiest.
         */
        folio_add_lru(dst);
-       if (page_was_mapped)
+       if (old_page_state & PAGE_WAS_MLOCKED)
                lru_add_drain();
 
-       if (page_was_mapped)
+       if (old_page_state & PAGE_WAS_MAPPED)
                remove_migration_ptes(src, dst, false);
 
 out_unlock_both:
@@ -1323,11 +1331,12 @@ out:
         */
        if (rc == -EAGAIN) {
                list_add(&dst->lru, prev);
-               __migrate_folio_record(dst, page_was_mapped, anon_vma);
+               __migrate_folio_record(dst, old_page_state, anon_vma);
                return rc;
        }
 
-       migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
+       migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
+                              anon_vma, true, ret);
        migrate_folio_undo_dst(dst, true, put_new_folio, private);
 
        return rc;
@@ -1496,6 +1505,7 @@ struct migrate_pages_stats {
        int nr_thp_succeeded;   /* THP migrated successfully */
        int nr_thp_failed;      /* THP failed to be migrated */
        int nr_thp_split;       /* THP split before migrating */
+       int nr_split;   /* Large folio (include THP) split before migrating */
 };
 
 /*
@@ -1615,6 +1625,7 @@ static int migrate_pages_batch(struct list_head *from,
        int nr_retry_pages = 0;
        int pass = 0;
        bool is_thp = false;
+       bool is_large = false;
        struct folio *folio, *folio2, *dst = NULL, *dst2;
        int rc, rc_saved = 0, nr_pages;
        LIST_HEAD(unmap_folios);
@@ -1630,7 +1641,8 @@ static int migrate_pages_batch(struct list_head *from,
                nr_retry_pages = 0;
 
                list_for_each_entry_safe(folio, folio2, from, lru) {
-                       is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
+                       is_large = folio_test_large(folio);
+                       is_thp = is_large && folio_test_pmd_mappable(folio);
                        nr_pages = folio_nr_pages(folio);
 
                        cond_resched();
@@ -1650,6 +1662,7 @@ static int migrate_pages_batch(struct list_head *from,
                                stats->nr_thp_failed++;
                                if (!try_split_folio(folio, split_folios)) {
                                        stats->nr_thp_split++;
+                                       stats->nr_split++;
                                        continue;
                                }
                                stats->nr_failed_pages += nr_pages;
@@ -1678,11 +1691,12 @@ static int migrate_pages_batch(struct list_head *from,
                                nr_failed++;
                                stats->nr_thp_failed += is_thp;
                                /* Large folio NUMA faulting doesn't split to retry. */
-                               if (folio_test_large(folio) && !nosplit) {
+                               if (is_large && !nosplit) {
                                        int ret = try_split_folio(folio, split_folios);
 
                                        if (!ret) {
                                                stats->nr_thp_split += is_thp;
+                                               stats->nr_split++;
                                                break;
                                        } else if (reason == MR_LONGTERM_PIN &&
                                                   ret == -EAGAIN) {
@@ -1795,12 +1809,12 @@ out:
        dst = list_first_entry(&dst_folios, struct folio, lru);
        dst2 = list_next_entry(dst, lru);
        list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
-               int page_was_mapped = 0;
+               int old_page_state = 0;
                struct anon_vma *anon_vma = NULL;
 
-               __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
-               migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
-                                      true, ret_folios);
+               __migrate_folio_extract(dst, &old_page_state, &anon_vma);
+               migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
+                                      anon_vma, true, ret_folios);
                list_del(&dst->lru);
                migrate_folio_undo_dst(dst, true, put_new_folio, private);
                dst = dst2;
@@ -1828,6 +1842,7 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
        stats->nr_succeeded += astats.nr_succeeded;
        stats->nr_thp_succeeded += astats.nr_thp_succeeded;
        stats->nr_thp_split += astats.nr_thp_split;
+       stats->nr_split += astats.nr_split;
        if (rc < 0) {
                stats->nr_failed_pages += astats.nr_failed_pages;
                stats->nr_thp_failed += astats.nr_thp_failed;
@@ -1835,7 +1850,11 @@ static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
                return rc;
        }
        stats->nr_thp_failed += astats.nr_thp_split;
-       nr_failed += astats.nr_thp_split;
+       /*
+        * Do not count rc, as pages will be retried below.
+        * Count nr_split only, since it includes nr_thp_split.
+        */
+       nr_failed += astats.nr_split;
        /*
         * Fall back to migrate all failed folios one by one synchronously. All
         * failed folios except split THPs will be retried, so their failure
@@ -1970,7 +1989,8 @@ out:
        count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
        trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
                               stats.nr_thp_succeeded, stats.nr_thp_failed,
-                              stats.nr_thp_split, mode, reason);
+                              stats.nr_thp_split, stats.nr_split, mode,
+                              reason);
 
        if (ret_succeeded)
                *ret_succeeded = stats.nr_succeeded;
@@ -2029,8 +2049,7 @@ static int store_status(int __user *status, int start, int value, int nr)
        return 0;
 }
 
-static int do_move_pages_to_node(struct mm_struct *mm,
-               struct list_head *pagelist, int node)
+static int do_move_pages_to_node(struct list_head *pagelist, int node)
 {
        int err;
        struct migration_target_control mtc = {
@@ -2060,8 +2079,8 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
        struct vm_area_struct *vma;
        unsigned long addr;
        struct page *page;
+       struct folio *folio;
        int err;
-       bool isolated;
 
        mmap_read_lock(mm);
        addr = (unsigned long)untagged_addr_remote(mm, p);
@@ -2082,51 +2101,44 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
        if (!page)
                goto out;
 
-       if (is_zone_device_page(page))
-               goto out_putpage;
+       folio = page_folio(page);
+       if (folio_is_zone_device(folio))
+               goto out_putfolio;
 
        err = 0;
-       if (page_to_nid(page) == node)
-               goto out_putpage;
+       if (folio_nid(folio) == node)
+               goto out_putfolio;
 
        err = -EACCES;
        if (page_mapcount(page) > 1 && !migrate_all)
-               goto out_putpage;
+               goto out_putfolio;
 
-       if (PageHuge(page)) {
-               if (PageHead(page)) {
-                       isolated = isolate_hugetlb(page_folio(page), pagelist);
-                       err = isolated ? 1 : -EBUSY;
-               }
+       err = -EBUSY;
+       if (folio_test_hugetlb(folio)) {
+               if (isolate_hugetlb(folio, pagelist))
+                       err = 1;
        } else {
-               struct page *head;
-
-               head = compound_head(page);
-               isolated = isolate_lru_page(head);
-               if (!isolated) {
-                       err = -EBUSY;
-                       goto out_putpage;
-               }
+               if (!folio_isolate_lru(folio))
+                       goto out_putfolio;
 
                err = 1;
-               list_add_tail(&head->lru, pagelist);
-               mod_node_page_state(page_pgdat(head),
-                       NR_ISOLATED_ANON + page_is_file_lru(head),
-                       thp_nr_pages(head));
+               list_add_tail(&folio->lru, pagelist);
+               node_stat_mod_folio(folio,
+                       NR_ISOLATED_ANON + folio_is_file_lru(folio),
+                       folio_nr_pages(folio));
        }
-out_putpage:
+out_putfolio:
        /*
-        * Either remove the duplicate refcount from
-        * isolate_lru_page() or drop the page ref if it was
-        * not isolated.
+        * Either remove the duplicate refcount from folio_isolate_lru()
+        * or drop the folio ref if it was not isolated.
         */
-       put_page(page);
+       folio_put(folio);
 out:
        mmap_read_unlock(mm);
        return err;
 }
 
-static int move_pages_and_store_status(struct mm_struct *mm, int node,
+static int move_pages_and_store_status(int node,
                struct list_head *pagelist, int __user *status,
                int start, int i, unsigned long nr_pages)
 {
@@ -2135,7 +2147,7 @@ static int move_pages_and_store_status(struct mm_struct *mm, int node,
        if (list_empty(pagelist))
                return 0;
 
-       err = do_move_pages_to_node(mm, pagelist, node);
+       err = do_move_pages_to_node(pagelist, node);
        if (err) {
                /*
                 * Positive err means the number of failed
@@ -2162,6 +2174,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                         const int __user *nodes,
                         int __user *status, int flags)
 {
+       compat_uptr_t __user *compat_pages = (void __user *)pages;
        int current_node = NUMA_NO_NODE;
        LIST_HEAD(pagelist);
        int start, i;
@@ -2174,8 +2187,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                int node;
 
                err = -EFAULT;
-               if (get_user(p, pages + i))
-                       goto out_flush;
+               if (in_compat_syscall()) {
+                       compat_uptr_t cp;
+
+                       if (get_user(cp, compat_pages + i))
+                               goto out_flush;
+
+                       p = compat_ptr(cp);
+               } else {
+                       if (get_user(p, pages + i))
+                               goto out_flush;
+               }
                if (get_user(node, nodes + i))
                        goto out_flush;
 
@@ -2193,7 +2215,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                        current_node = node;
                        start = i;
                } else if (node != current_node) {
-                       err = move_pages_and_store_status(mm, current_node,
+                       err = move_pages_and_store_status(current_node,
                                        &pagelist, status, start, i, nr_pages);
                        if (err)
                                goto out;
@@ -2228,7 +2250,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
                if (err)
                        goto out_flush;
 
-               err = move_pages_and_store_status(mm, current_node, &pagelist,
+               err = move_pages_and_store_status(current_node, &pagelist,
                                status, start, i, nr_pages);
                if (err) {
                        /* We have accounted for page i */
@@ -2240,7 +2262,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
        }
 out_flush:
        /* Make sure we do not overwrite the existing error */
-       err1 = move_pages_and_store_status(mm, current_node, &pagelist,
+       err1 = move_pages_and_store_status(current_node, &pagelist,
                                status, start, i, nr_pages);
        if (err >= 0)
                err = err1;
@@ -2481,16 +2503,9 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
        return __folio_alloc_node(gfp, order, nid);
 }
 
-static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
+static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
 {
-       int nr_pages = thp_nr_pages(page);
-       int order = compound_order(page);
-
-       VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
-
-       /* Do not migrate THP mapped by multiple processes */
-       if (PageTransHuge(page) && total_mapcount(page) > 1)
-               return 0;
+       int nr_pages = folio_nr_pages(folio);
 
        /* Avoid migrating to a node that is nearly full */
        if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
@@ -2502,75 +2517,79 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
                        if (managed_zone(pgdat->node_zones + z))
                                break;
                }
-               wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
+               wakeup_kswapd(pgdat->node_zones + z, 0,
+                             folio_order(folio), ZONE_MOVABLE);
                return 0;
        }
 
-       if (!isolate_lru_page(page))
+       if (!folio_isolate_lru(folio))
                return 0;
 
-       mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
+       node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
                            nr_pages);
 
        /*
-        * Isolating the page has taken another reference, so the
-        * caller's reference can be safely dropped without the page
+        * Isolating the folio has taken another reference, so the
+        * caller's reference can be safely dropped without the folio
         * disappearing underneath us during migration.
         */
-       put_page(page);
+       folio_put(folio);
        return 1;
 }
 
 /*
- * Attempt to migrate a misplaced page to the specified destination
+ * Attempt to migrate a misplaced folio to the specified destination
  * node. Caller is expected to have an elevated reference count on
- * the page that will be dropped by this function before returning.
+ * the folio that will be dropped by this function before returning.
  */
-int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
-                          int node)
+int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+                           int node)
 {
        pg_data_t *pgdat = NODE_DATA(node);
        int isolated;
        int nr_remaining;
        unsigned int nr_succeeded;
        LIST_HEAD(migratepages);
-       int nr_pages = thp_nr_pages(page);
+       int nr_pages = folio_nr_pages(folio);
 
        /*
-        * Don't migrate file pages that are mapped in multiple processes
+        * Don't migrate file folios that are mapped in multiple processes
         * with execute permissions as they are probably shared libraries.
+        * To check if the folio is shared, ideally we want to make sure
+        * every page is mapped to the same process. Doing that is very
+        * expensive, so check the estimated mapcount of the folio instead.
         */
-       if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
+       if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) &&
            (vma->vm_flags & VM_EXEC))
                goto out;
 
        /*
-        * Also do not migrate dirty pages as not all filesystems can move
-        * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
+        * Also do not migrate dirty folios as not all filesystems can move
+        * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
         */
-       if (page_is_file_lru(page) && PageDirty(page))
+       if (folio_is_file_lru(folio) && folio_test_dirty(folio))
                goto out;
 
-       isolated = numamigrate_isolate_page(pgdat, page);
+       isolated = numamigrate_isolate_folio(pgdat, folio);
        if (!isolated)
                goto out;
 
-       list_add(&page->lru, &migratepages);
+       list_add(&folio->lru, &migratepages);
        nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
                                     NULL, node, MIGRATE_ASYNC,
                                     MR_NUMA_MISPLACED, &nr_succeeded);
        if (nr_remaining) {
                if (!list_empty(&migratepages)) {
-                       list_del(&page->lru);
-                       mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -nr_pages);
-                       putback_lru_page(page);
+                       list_del(&folio->lru);
+                       node_stat_mod_folio(folio, NR_ISOLATED_ANON +
+                                       folio_is_file_lru(folio), -nr_pages);
+                       folio_putback_lru(folio);
                }
                isolated = 0;
        }
        if (nr_succeeded) {
                count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
-               if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
+               if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
                        mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
                                            nr_succeeded);
        }
@@ -2578,7 +2597,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        return isolated;
 
 out:
-       put_page(page);
+       folio_put(folio);
        return 0;
 }
 #endif /* CONFIG_NUMA_BALANCING */