1 // SPDX-License-Identifier: GPL-2.0
3 * Memory Migration functionality - linux/mm/migrate.c
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pfn_t.h>
42 #include <linux/memremap.h>
43 #include <linux/userfaultfd_k.h>
44 #include <linux/balloon_compaction.h>
45 #include <linux/page_idle.h>
46 #include <linux/page_owner.h>
47 #include <linux/sched/mm.h>
48 #include <linux/ptrace.h>
49 #include <linux/oom.h>
50 #include <linux/memory.h>
51 #include <linux/random.h>
52 #include <linux/sched/sysctl.h>
53 #include <linux/memory-tiers.h>
55 #include <asm/tlbflush.h>
57 #include <trace/events/migrate.h>
61 int isolate_movable_page(struct page *page, isolate_mode_t mode)
63 const struct movable_operations *mops;
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
74 if (unlikely(!get_page_unless_zero(page)))
77 if (unlikely(PageSlab(page)))
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
86 if (unlikely(!__PageMovable(page)))
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
90 if (unlikely(PageSlab(page)))
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
104 if (unlikely(!trylock_page(page)))
107 if (!PageMovable(page) || PageIsolated(page))
108 goto out_no_isolated;
110 mops = page_movable_ops(page);
111 VM_BUG_ON_PAGE(!mops, page);
113 if (!mops->isolate_page(page, mode))
114 goto out_no_isolated;
116 /* Driver shouldn't use PG_isolated bit of page->flags */
117 WARN_ON_ONCE(PageIsolated(page));
118 SetPageIsolated(page);
131 static void putback_movable_page(struct page *page)
133 const struct movable_operations *mops = page_movable_ops(page);
135 mops->putback_page(page);
136 ClearPageIsolated(page);
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145 * and isolate_hugetlb().
147 void putback_movable_pages(struct list_head *l)
152 list_for_each_entry_safe(page, page2, l, lru) {
153 if (unlikely(PageHuge(page))) {
154 putback_active_hugepage(page);
157 list_del(&page->lru);
159 * We isolated non-lru movable page so here we can use
160 * __PageMovable because LRU page's mapping cannot have
161 * PAGE_MAPPING_MOVABLE.
163 if (unlikely(__PageMovable(page))) {
164 VM_BUG_ON_PAGE(!PageIsolated(page), page);
166 if (PageMovable(page))
167 putback_movable_page(page);
169 ClearPageIsolated(page);
173 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
174 page_is_file_lru(page), -thp_nr_pages(page));
175 putback_lru_page(page);
181 * Restore a potential migration pte to a working pte entry
183 static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
188 while (page_vma_mapped_walk(&pvmw)) {
189 rmap_t rmap_flags = RMAP_NONE;
193 unsigned long idx = 0;
195 /* pgoff is invalid for ksm pages, but they are never large */
196 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
197 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
198 new = folio_page(folio, idx);
200 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
201 /* PMD-mapped THP migration entry */
203 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
204 !folio_test_pmd_mappable(folio), folio);
205 remove_migration_pmd(&pvmw, new);
211 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
212 if (pte_swp_soft_dirty(*pvmw.pte))
213 pte = pte_mksoft_dirty(pte);
216 * Recheck VMA as permissions can change since migration started
218 entry = pte_to_swp_entry(*pvmw.pte);
219 if (!is_migration_entry_young(entry))
220 pte = pte_mkold(pte);
221 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
222 pte = pte_mkdirty(pte);
223 if (is_writable_migration_entry(entry))
224 pte = maybe_mkwrite(pte, vma);
225 else if (pte_swp_uffd_wp(*pvmw.pte))
226 pte = pte_mkuffd_wp(pte);
228 pte = pte_wrprotect(pte);
230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231 rmap_flags |= RMAP_EXCLUSIVE;
233 if (unlikely(is_device_private_page(new))) {
235 entry = make_writable_device_private_entry(
238 entry = make_readable_device_private_entry(
240 pte = swp_entry_to_pte(entry);
241 if (pte_swp_soft_dirty(*pvmw.pte))
242 pte = pte_swp_mksoft_dirty(pte);
243 if (pte_swp_uffd_wp(*pvmw.pte))
244 pte = pte_swp_mkuffd_wp(pte);
247 #ifdef CONFIG_HUGETLB_PAGE
248 if (folio_test_hugetlb(folio)) {
249 unsigned int shift = huge_page_shift(hstate_vma(vma));
251 pte = pte_mkhuge(pte);
252 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
253 if (folio_test_anon(folio))
254 hugepage_add_anon_rmap(new, vma, pvmw.address,
257 page_dup_file_rmap(new, true);
258 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
262 if (folio_test_anon(folio))
263 page_add_anon_rmap(new, vma, pvmw.address,
266 page_add_file_rmap(new, vma, false);
267 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
269 if (vma->vm_flags & VM_LOCKED)
270 mlock_page_drain_local();
272 trace_remove_migration_pte(pvmw.address, pte_val(pte),
273 compound_order(new));
275 /* No need to invalidate - it was non-present before */
276 update_mmu_cache(vma, pvmw.address, pvmw.pte);
283 * Get rid of all migration entries and replace them by
284 * references to the indicated page.
286 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
288 struct rmap_walk_control rwc = {
289 .rmap_one = remove_migration_pte,
294 rmap_walk_locked(dst, &rwc);
296 rmap_walk(dst, &rwc);
300 * Something used the pte of a page under migration. We need to
301 * get to the page and wait until migration is finished.
302 * When we return from this function the fault will be retried.
304 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
312 if (!is_swap_pte(pte))
315 entry = pte_to_swp_entry(pte);
316 if (!is_migration_entry(entry))
319 migration_entry_wait_on_locked(entry, ptep, ptl);
322 pte_unmap_unlock(ptep, ptl);
325 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
326 unsigned long address)
328 spinlock_t *ptl = pte_lockptr(mm, pmd);
329 pte_t *ptep = pte_offset_map(pmd, address);
330 __migration_entry_wait(mm, ptep, ptl);
333 #ifdef CONFIG_HUGETLB_PAGE
334 void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl)
339 pte = huge_ptep_get(ptep);
341 if (unlikely(!is_hugetlb_entry_migration(pte)))
344 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
347 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
349 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
351 __migration_entry_wait_huge(pte, ptl);
355 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
356 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
360 ptl = pmd_lock(mm, pmd);
361 if (!is_pmd_migration_entry(*pmd))
363 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
370 static int folio_expected_refs(struct address_space *mapping,
377 refs += folio_nr_pages(folio);
378 if (folio_test_private(folio))
385 * Replace the page in the mapping.
387 * The number of remaining references must be:
388 * 1 for anonymous pages without a mapping
389 * 2 for pages with a mapping
390 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
392 int folio_migrate_mapping(struct address_space *mapping,
393 struct folio *newfolio, struct folio *folio, int extra_count)
395 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
396 struct zone *oldzone, *newzone;
398 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
399 long nr = folio_nr_pages(folio);
402 /* Anonymous page without mapping */
403 if (folio_ref_count(folio) != expected_count)
406 /* No turning back from here */
407 newfolio->index = folio->index;
408 newfolio->mapping = folio->mapping;
409 if (folio_test_swapbacked(folio))
410 __folio_set_swapbacked(newfolio);
412 return MIGRATEPAGE_SUCCESS;
415 oldzone = folio_zone(folio);
416 newzone = folio_zone(newfolio);
419 if (!folio_ref_freeze(folio, expected_count)) {
420 xas_unlock_irq(&xas);
425 * Now we know that no one else is looking at the folio:
426 * no turning back from here.
428 newfolio->index = folio->index;
429 newfolio->mapping = folio->mapping;
430 folio_ref_add(newfolio, nr); /* add cache reference */
431 if (folio_test_swapbacked(folio)) {
432 __folio_set_swapbacked(newfolio);
433 if (folio_test_swapcache(folio)) {
434 folio_set_swapcache(newfolio);
435 newfolio->private = folio_get_private(folio);
438 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
441 /* Move dirty while page refs frozen and newpage not yet exposed */
442 dirty = folio_test_dirty(folio);
444 folio_clear_dirty(folio);
445 folio_set_dirty(newfolio);
448 xas_store(&xas, newfolio);
451 * Drop cache reference from old page by unfreezing
452 * to one less reference.
453 * We know this isn't the last reference.
455 folio_ref_unfreeze(folio, expected_count - nr);
458 /* Leave irq disabled to prevent preemption while updating stats */
461 * If moved to a different zone then also account
462 * the page for that zone. Other VM counters will be
463 * taken care of when we establish references to the
464 * new page and drop references to the old page.
466 * Note that anonymous pages are accounted for
467 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
468 * are mapped to swap space.
470 if (newzone != oldzone) {
471 struct lruvec *old_lruvec, *new_lruvec;
472 struct mem_cgroup *memcg;
474 memcg = folio_memcg(folio);
475 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
476 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
478 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
479 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
480 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
481 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
482 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
485 if (folio_test_swapcache(folio)) {
486 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
487 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
490 if (dirty && mapping_can_writeback(mapping)) {
491 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
492 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
493 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
494 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
499 return MIGRATEPAGE_SUCCESS;
501 EXPORT_SYMBOL(folio_migrate_mapping);
504 * The expected number of remaining references is the same as that
505 * of folio_migrate_mapping().
507 int migrate_huge_page_move_mapping(struct address_space *mapping,
508 struct folio *dst, struct folio *src)
510 XA_STATE(xas, &mapping->i_pages, folio_index(src));
514 expected_count = 2 + folio_has_private(src);
515 if (!folio_ref_freeze(src, expected_count)) {
516 xas_unlock_irq(&xas);
520 dst->index = src->index;
521 dst->mapping = src->mapping;
525 xas_store(&xas, dst);
527 folio_ref_unfreeze(src, expected_count - 1);
529 xas_unlock_irq(&xas);
531 return MIGRATEPAGE_SUCCESS;
535 * Copy the flags and some other ancillary information
537 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
541 if (folio_test_error(folio))
542 folio_set_error(newfolio);
543 if (folio_test_referenced(folio))
544 folio_set_referenced(newfolio);
545 if (folio_test_uptodate(folio))
546 folio_mark_uptodate(newfolio);
547 if (folio_test_clear_active(folio)) {
548 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
549 folio_set_active(newfolio);
550 } else if (folio_test_clear_unevictable(folio))
551 folio_set_unevictable(newfolio);
552 if (folio_test_workingset(folio))
553 folio_set_workingset(newfolio);
554 if (folio_test_checked(folio))
555 folio_set_checked(newfolio);
557 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
558 * migration entries. We can still have PG_anon_exclusive set on an
559 * effectively unmapped and unreferenced first sub-pages of an
560 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
562 if (folio_test_mappedtodisk(folio))
563 folio_set_mappedtodisk(newfolio);
565 /* Move dirty on pages not done by folio_migrate_mapping() */
566 if (folio_test_dirty(folio))
567 folio_set_dirty(newfolio);
569 if (folio_test_young(folio))
570 folio_set_young(newfolio);
571 if (folio_test_idle(folio))
572 folio_set_idle(newfolio);
575 * Copy NUMA information to the new page, to prevent over-eager
576 * future migrations of this same page.
578 cpupid = page_cpupid_xchg_last(&folio->page, -1);
580 * For memory tiering mode, when migrate between slow and fast
581 * memory node, reset cpupid, because that is used to record
582 * page access time in slow memory node.
584 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
585 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
586 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
588 if (f_toptier != t_toptier)
591 page_cpupid_xchg_last(&newfolio->page, cpupid);
593 folio_migrate_ksm(newfolio, folio);
595 * Please do not reorder this without considering how mm/ksm.c's
596 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
598 if (folio_test_swapcache(folio))
599 folio_clear_swapcache(folio);
600 folio_clear_private(folio);
602 /* page->private contains hugetlb specific flags */
603 if (!folio_test_hugetlb(folio))
604 folio->private = NULL;
607 * If any waiters have accumulated on the new page then
610 if (folio_test_writeback(newfolio))
611 folio_end_writeback(newfolio);
614 * PG_readahead shares the same bit with PG_reclaim. The above
615 * end_page_writeback() may clear PG_readahead mistakenly, so set the
618 if (folio_test_readahead(folio))
619 folio_set_readahead(newfolio);
621 folio_copy_owner(newfolio, folio);
623 if (!folio_test_hugetlb(folio))
624 mem_cgroup_migrate(folio, newfolio);
626 EXPORT_SYMBOL(folio_migrate_flags);
628 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
630 folio_copy(newfolio, folio);
631 folio_migrate_flags(newfolio, folio);
633 EXPORT_SYMBOL(folio_migrate_copy);
635 /************************************************************
636 * Migration functions
637 ***********************************************************/
639 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
640 struct folio *src, enum migrate_mode mode, int extra_count)
644 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
646 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
648 if (rc != MIGRATEPAGE_SUCCESS)
651 if (mode != MIGRATE_SYNC_NO_COPY)
652 folio_migrate_copy(dst, src);
654 folio_migrate_flags(dst, src);
655 return MIGRATEPAGE_SUCCESS;
659 * migrate_folio() - Simple folio migration.
660 * @mapping: The address_space containing the folio.
661 * @dst: The folio to migrate the data to.
662 * @src: The folio containing the current data.
663 * @mode: How to migrate the page.
665 * Common logic to directly migrate a single LRU folio suitable for
666 * folios that do not use PagePrivate/PagePrivate2.
668 * Folios are locked upon entry and exit.
670 int migrate_folio(struct address_space *mapping, struct folio *dst,
671 struct folio *src, enum migrate_mode mode)
673 return migrate_folio_extra(mapping, dst, src, mode, 0);
675 EXPORT_SYMBOL(migrate_folio);
678 /* Returns true if all buffers are successfully locked */
679 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
680 enum migrate_mode mode)
682 struct buffer_head *bh = head;
684 /* Simple case, sync compaction */
685 if (mode != MIGRATE_ASYNC) {
688 bh = bh->b_this_page;
690 } while (bh != head);
695 /* async case, we cannot block on lock_buffer so use trylock_buffer */
697 if (!trylock_buffer(bh)) {
699 * We failed to lock the buffer and cannot stall in
700 * async migration. Release the taken locks
702 struct buffer_head *failed_bh = bh;
704 while (bh != failed_bh) {
706 bh = bh->b_this_page;
711 bh = bh->b_this_page;
712 } while (bh != head);
716 static int __buffer_migrate_folio(struct address_space *mapping,
717 struct folio *dst, struct folio *src, enum migrate_mode mode,
720 struct buffer_head *bh, *head;
724 head = folio_buffers(src);
726 return migrate_folio(mapping, dst, src, mode);
728 /* Check whether page does not have extra refs before we do more work */
729 expected_count = folio_expected_refs(mapping, src);
730 if (folio_ref_count(src) != expected_count)
733 if (!buffer_migrate_lock_buffers(head, mode))
738 bool invalidated = false;
742 spin_lock(&mapping->private_lock);
745 if (atomic_read(&bh->b_count)) {
749 bh = bh->b_this_page;
750 } while (bh != head);
756 spin_unlock(&mapping->private_lock);
757 invalidate_bh_lrus();
759 goto recheck_buffers;
763 rc = folio_migrate_mapping(mapping, dst, src, 0);
764 if (rc != MIGRATEPAGE_SUCCESS)
767 folio_attach_private(dst, folio_detach_private(src));
771 set_bh_page(bh, &dst->page, bh_offset(bh));
772 bh = bh->b_this_page;
773 } while (bh != head);
775 if (mode != MIGRATE_SYNC_NO_COPY)
776 folio_migrate_copy(dst, src);
778 folio_migrate_flags(dst, src);
780 rc = MIGRATEPAGE_SUCCESS;
783 spin_unlock(&mapping->private_lock);
787 bh = bh->b_this_page;
788 } while (bh != head);
794 * buffer_migrate_folio() - Migration function for folios with buffers.
795 * @mapping: The address space containing @src.
796 * @dst: The folio to migrate to.
797 * @src: The folio to migrate from.
798 * @mode: How to migrate the folio.
800 * This function can only be used if the underlying filesystem guarantees
801 * that no other references to @src exist. For example attached buffer
802 * heads are accessed only under the folio lock. If your filesystem cannot
803 * provide this guarantee, buffer_migrate_folio_norefs() may be more
806 * Return: 0 on success or a negative errno on failure.
808 int buffer_migrate_folio(struct address_space *mapping,
809 struct folio *dst, struct folio *src, enum migrate_mode mode)
811 return __buffer_migrate_folio(mapping, dst, src, mode, false);
813 EXPORT_SYMBOL(buffer_migrate_folio);
816 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
817 * @mapping: The address space containing @src.
818 * @dst: The folio to migrate to.
819 * @src: The folio to migrate from.
820 * @mode: How to migrate the folio.
822 * Like buffer_migrate_folio() except that this variant is more careful
823 * and checks that there are also no buffer head references. This function
824 * is the right one for mappings where buffer heads are directly looked
825 * up and referenced (such as block device mappings).
827 * Return: 0 on success or a negative errno on failure.
829 int buffer_migrate_folio_norefs(struct address_space *mapping,
830 struct folio *dst, struct folio *src, enum migrate_mode mode)
832 return __buffer_migrate_folio(mapping, dst, src, mode, true);
834 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
837 int filemap_migrate_folio(struct address_space *mapping,
838 struct folio *dst, struct folio *src, enum migrate_mode mode)
842 ret = folio_migrate_mapping(mapping, dst, src, 0);
843 if (ret != MIGRATEPAGE_SUCCESS)
846 if (folio_get_private(src))
847 folio_attach_private(dst, folio_detach_private(src));
849 if (mode != MIGRATE_SYNC_NO_COPY)
850 folio_migrate_copy(dst, src);
852 folio_migrate_flags(dst, src);
853 return MIGRATEPAGE_SUCCESS;
855 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
858 * Writeback a folio to clean the dirty state
860 static int writeout(struct address_space *mapping, struct folio *folio)
862 struct writeback_control wbc = {
863 .sync_mode = WB_SYNC_NONE,
866 .range_end = LLONG_MAX,
871 if (!mapping->a_ops->writepage)
872 /* No write method for the address space */
875 if (!folio_clear_dirty_for_io(folio))
876 /* Someone else already triggered a write */
880 * A dirty folio may imply that the underlying filesystem has
881 * the folio on some queue. So the folio must be clean for
882 * migration. Writeout may mean we lose the lock and the
883 * folio state is no longer what we checked for earlier.
884 * At this point we know that the migration attempt cannot
887 remove_migration_ptes(folio, folio, false);
889 rc = mapping->a_ops->writepage(&folio->page, &wbc);
891 if (rc != AOP_WRITEPAGE_ACTIVATE)
892 /* unlocked. Relock */
895 return (rc < 0) ? -EIO : -EAGAIN;
899 * Default handling if a filesystem does not provide a migration function.
901 static int fallback_migrate_folio(struct address_space *mapping,
902 struct folio *dst, struct folio *src, enum migrate_mode mode)
904 if (folio_test_dirty(src)) {
905 /* Only writeback folios in full synchronous migration */
908 case MIGRATE_SYNC_NO_COPY:
913 return writeout(mapping, src);
917 * Buffers may be managed in a filesystem specific way.
918 * We must have no buffers or drop them.
920 if (folio_test_private(src) &&
921 !filemap_release_folio(src, GFP_KERNEL))
922 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
924 return migrate_folio(mapping, dst, src, mode);
928 * Move a page to a newly allocated page
929 * The page is locked and all ptes have been successfully removed.
931 * The new page will have replaced the old page if this function
936 * MIGRATEPAGE_SUCCESS - success
938 static int move_to_new_folio(struct folio *dst, struct folio *src,
939 enum migrate_mode mode)
942 bool is_lru = !__PageMovable(&src->page);
944 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
945 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
947 if (likely(is_lru)) {
948 struct address_space *mapping = folio_mapping(src);
951 rc = migrate_folio(mapping, dst, src, mode);
952 else if (mapping->a_ops->migrate_folio)
954 * Most folios have a mapping and most filesystems
955 * provide a migrate_folio callback. Anonymous folios
956 * are part of swap space which also has its own
957 * migrate_folio callback. This is the most common path
958 * for page migration.
960 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
963 rc = fallback_migrate_folio(mapping, dst, src, mode);
965 const struct movable_operations *mops;
968 * In case of non-lru page, it could be released after
969 * isolation step. In that case, we shouldn't try migration.
971 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
972 if (!folio_test_movable(src)) {
973 rc = MIGRATEPAGE_SUCCESS;
974 folio_clear_isolated(src);
978 mops = page_movable_ops(&src->page);
979 rc = mops->migrate_page(&dst->page, &src->page, mode);
980 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
981 !folio_test_isolated(src));
985 * When successful, old pagecache src->mapping must be cleared before
986 * src is freed; but stats require that PageAnon be left as PageAnon.
988 if (rc == MIGRATEPAGE_SUCCESS) {
989 if (__PageMovable(&src->page)) {
990 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
993 * We clear PG_movable under page_lock so any compactor
994 * cannot try to migrate this page.
996 folio_clear_isolated(src);
1000 * Anonymous and movable src->mapping will be cleared by
1001 * free_pages_prepare so don't reset it here for keeping
1002 * the type to work PageAnon, for example.
1004 if (!folio_mapping_flags(src))
1005 src->mapping = NULL;
1007 if (likely(!folio_is_zone_device(dst)))
1008 flush_dcache_folio(dst);
1014 static int __unmap_and_move(struct folio *src, struct folio *dst,
1015 int force, enum migrate_mode mode)
1018 bool page_was_mapped = false;
1019 struct anon_vma *anon_vma = NULL;
1020 bool is_lru = !__PageMovable(&src->page);
1022 if (!folio_trylock(src)) {
1023 if (!force || mode == MIGRATE_ASYNC)
1027 * It's not safe for direct compaction to call lock_page.
1028 * For example, during page readahead pages are added locked
1029 * to the LRU. Later, when the IO completes the pages are
1030 * marked uptodate and unlocked. However, the queueing
1031 * could be merging multiple pages for one bio (e.g.
1032 * mpage_readahead). If an allocation happens for the
1033 * second or third page, the process can end up locking
1034 * the same page twice and deadlocking. Rather than
1035 * trying to be clever about what pages can be locked,
1036 * avoid the use of lock_page for direct compaction
1039 if (current->flags & PF_MEMALLOC)
1045 if (folio_test_writeback(src)) {
1047 * Only in the case of a full synchronous migration is it
1048 * necessary to wait for PageWriteback. In the async case,
1049 * the retry loop is too short and in the sync-light case,
1050 * the overhead of stalling is too much
1054 case MIGRATE_SYNC_NO_COPY:
1062 folio_wait_writeback(src);
1066 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1067 * we cannot notice that anon_vma is freed while we migrate a page.
1068 * This get_anon_vma() delays freeing anon_vma pointer until the end
1069 * of migration. File cache pages are no problem because of page_lock()
1070 * File Caches may use write_page() or lock_page() in migration, then,
1071 * just care Anon page here.
1073 * Only folio_get_anon_vma() understands the subtleties of
1074 * getting a hold on an anon_vma from outside one of its mms.
1075 * But if we cannot get anon_vma, then we won't need it anyway,
1076 * because that implies that the anon page is no longer mapped
1077 * (and cannot be remapped so long as we hold the page lock).
1079 if (folio_test_anon(src) && !folio_test_ksm(src))
1080 anon_vma = folio_get_anon_vma(src);
1083 * Block others from accessing the new page when we get around to
1084 * establishing additional references. We are usually the only one
1085 * holding a reference to dst at this point. We used to have a BUG
1086 * here if folio_trylock(dst) fails, but would like to allow for
1087 * cases where there might be a race with the previous use of dst.
1088 * This is much like races on refcount of oldpage: just don't BUG().
1090 if (unlikely(!folio_trylock(dst)))
1093 if (unlikely(!is_lru)) {
1094 rc = move_to_new_folio(dst, src, mode);
1095 goto out_unlock_both;
1099 * Corner case handling:
1100 * 1. When a new swap-cache page is read into, it is added to the LRU
1101 * and treated as swapcache but it has no rmap yet.
1102 * Calling try_to_unmap() against a src->mapping==NULL page will
1103 * trigger a BUG. So handle it here.
1104 * 2. An orphaned page (see truncate_cleanup_page) might have
1105 * fs-private metadata. The page can be picked up due to memory
1106 * offlining. Everywhere else except page reclaim, the page is
1107 * invisible to the vm, so the page can not be migrated. So try to
1108 * free the metadata, so the page can be freed.
1110 if (!src->mapping) {
1111 if (folio_test_private(src)) {
1112 try_to_free_buffers(src);
1113 goto out_unlock_both;
1115 } else if (folio_mapped(src)) {
1116 /* Establish migration ptes */
1117 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1118 !folio_test_ksm(src) && !anon_vma, src);
1119 try_to_migrate(src, 0);
1120 page_was_mapped = true;
1123 if (!folio_mapped(src))
1124 rc = move_to_new_folio(dst, src, mode);
1127 * When successful, push dst to LRU immediately: so that if it
1128 * turns out to be an mlocked page, remove_migration_ptes() will
1129 * automatically build up the correct dst->mlock_count for it.
1131 * We would like to do something similar for the old page, when
1132 * unsuccessful, and other cases when a page has been temporarily
1133 * isolated from the unevictable LRU: but this case is the easiest.
1135 if (rc == MIGRATEPAGE_SUCCESS) {
1137 if (page_was_mapped)
1141 if (page_was_mapped)
1142 remove_migration_ptes(src,
1143 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1148 /* Drop an anon_vma reference if we took one */
1150 put_anon_vma(anon_vma);
1154 * If migration is successful, decrease refcount of dst,
1155 * which will not free the page because new page owner increased
1158 if (rc == MIGRATEPAGE_SUCCESS)
1165 * Obtain the lock on folio, remove all ptes and migrate the folio
1166 * to the newly allocated folio in dst.
1168 static int unmap_and_move(new_page_t get_new_page,
1169 free_page_t put_new_page,
1170 unsigned long private, struct folio *src,
1171 int force, enum migrate_mode mode,
1172 enum migrate_reason reason,
1173 struct list_head *ret)
1176 int rc = MIGRATEPAGE_SUCCESS;
1177 struct page *newpage = NULL;
1179 if (!thp_migration_supported() && folio_test_transhuge(src))
1182 if (folio_ref_count(src) == 1) {
1183 /* Folio was freed from under us. So we are done. */
1184 folio_clear_active(src);
1185 folio_clear_unevictable(src);
1186 /* free_pages_prepare() will clear PG_isolated. */
1190 newpage = get_new_page(&src->page, private);
1193 dst = page_folio(newpage);
1195 dst->private = NULL;
1196 rc = __unmap_and_move(src, dst, force, mode);
1197 if (rc == MIGRATEPAGE_SUCCESS)
1198 set_page_owner_migrate_reason(&dst->page, reason);
1201 if (rc != -EAGAIN) {
1203 * A folio that has been migrated has all references
1204 * removed and will be freed. A folio that has not been
1205 * migrated will have kept its references and be restored.
1207 list_del(&src->lru);
1211 * If migration is successful, releases reference grabbed during
1212 * isolation. Otherwise, restore the folio to right list unless
1215 if (rc == MIGRATEPAGE_SUCCESS) {
1217 * Compaction can migrate also non-LRU folios which are
1218 * not accounted to NR_ISOLATED_*. They can be recognized
1219 * as __folio_test_movable
1221 if (likely(!__folio_test_movable(src)))
1222 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1223 folio_is_file_lru(src), -folio_nr_pages(src));
1225 if (reason != MR_MEMORY_FAILURE)
1227 * We release the folio in page_handle_poison.
1232 list_add_tail(&src->lru, ret);
1235 put_new_page(&dst->page, private);
1244 * Counterpart of unmap_and_move_page() for hugepage migration.
1246 * This function doesn't wait the completion of hugepage I/O
1247 * because there is no race between I/O and migration for hugepage.
1248 * Note that currently hugepage I/O occurs only in direct I/O
1249 * where no lock is held and PG_writeback is irrelevant,
1250 * and writeback status of all subpages are counted in the reference
1251 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1252 * under direct I/O, the reference of the head page is 512 and a bit more.)
1253 * This means that when we try to migrate hugepage whose subpages are
1254 * doing direct I/O, some references remain after try_to_unmap() and
1255 * hugepage migration fails without data corruption.
1257 * There is also no race when direct I/O is issued on the page under migration,
1258 * because then pte is replaced with migration swap entry and direct I/O code
1259 * will wait in the page fault for migration to complete.
1261 static int unmap_and_move_huge_page(new_page_t get_new_page,
1262 free_page_t put_new_page, unsigned long private,
1263 struct page *hpage, int force,
1264 enum migrate_mode mode, int reason,
1265 struct list_head *ret)
1267 struct folio *dst, *src = page_folio(hpage);
1269 int page_was_mapped = 0;
1270 struct page *new_hpage;
1271 struct anon_vma *anon_vma = NULL;
1272 struct address_space *mapping = NULL;
1275 * Migratability of hugepages depends on architectures and their size.
1276 * This check is necessary because some callers of hugepage migration
1277 * like soft offline and memory hotremove don't walk through page
1278 * tables or check whether the hugepage is pmd-based or not before
1279 * kicking migration.
1281 if (!hugepage_migration_supported(page_hstate(hpage)))
1284 if (folio_ref_count(src) == 1) {
1285 /* page was freed from under us. So we are done. */
1286 putback_active_hugepage(hpage);
1287 return MIGRATEPAGE_SUCCESS;
1290 new_hpage = get_new_page(hpage, private);
1293 dst = page_folio(new_hpage);
1295 if (!folio_trylock(src)) {
1300 case MIGRATE_SYNC_NO_COPY:
1309 * Check for pages which are in the process of being freed. Without
1310 * folio_mapping() set, hugetlbfs specific move page routine will not
1311 * be called and we could leak usage counts for subpools.
1313 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1318 if (folio_test_anon(src))
1319 anon_vma = folio_get_anon_vma(src);
1321 if (unlikely(!folio_trylock(dst)))
1324 if (folio_mapped(src)) {
1325 enum ttu_flags ttu = 0;
1327 if (!folio_test_anon(src)) {
1329 * In shared mappings, try_to_unmap could potentially
1330 * call huge_pmd_unshare. Because of this, take
1331 * semaphore in write mode here and set TTU_RMAP_LOCKED
1332 * to let lower levels know we have taken the lock.
1334 mapping = hugetlb_page_mapping_lock_write(hpage);
1335 if (unlikely(!mapping))
1336 goto unlock_put_anon;
1338 ttu = TTU_RMAP_LOCKED;
1341 try_to_migrate(src, ttu);
1342 page_was_mapped = 1;
1344 if (ttu & TTU_RMAP_LOCKED)
1345 i_mmap_unlock_write(mapping);
1348 if (!folio_mapped(src))
1349 rc = move_to_new_folio(dst, src, mode);
1351 if (page_was_mapped)
1352 remove_migration_ptes(src,
1353 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1360 put_anon_vma(anon_vma);
1362 if (rc == MIGRATEPAGE_SUCCESS) {
1363 move_hugetlb_state(src, dst, reason);
1364 put_new_page = NULL;
1370 if (rc == MIGRATEPAGE_SUCCESS)
1371 putback_active_hugepage(hpage);
1372 else if (rc != -EAGAIN)
1373 list_move_tail(&src->lru, ret);
1376 * If migration was not successful and there's a freeing callback, use
1377 * it. Otherwise, put_page() will drop the reference grabbed during
1381 put_new_page(new_hpage, private);
1383 putback_active_hugepage(new_hpage);
1388 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
1393 rc = split_folio_to_list(folio, split_folios);
1394 folio_unlock(folio);
1396 list_move_tail(&folio->lru, split_folios);
1402 * migrate_pages - migrate the folios specified in a list, to the free folios
1403 * supplied as the target for the page migration
1405 * @from: The list of folios to be migrated.
1406 * @get_new_page: The function used to allocate free folios to be used
1407 * as the target of the folio migration.
1408 * @put_new_page: The function used to free target folios if migration
1409 * fails, or NULL if no special handling is necessary.
1410 * @private: Private data to be passed on to get_new_page()
1411 * @mode: The migration mode that specifies the constraints for
1412 * folio migration, if any.
1413 * @reason: The reason for folio migration.
1414 * @ret_succeeded: Set to the number of folios migrated successfully if
1415 * the caller passes a non-NULL pointer.
1417 * The function returns after 10 attempts or if no folios are movable any more
1418 * because the list has become empty or no retryable folios exist any more.
1419 * It is caller's responsibility to call putback_movable_pages() to return folios
1420 * to the LRU or free list only if ret != 0.
1422 * Returns the number of {normal folio, large folio, hugetlb} that were not
1423 * migrated, or an error code. The number of large folio splits will be
1424 * considered as the number of non-migrated large folio, no matter how many
1425 * split folios of the large folio are migrated successfully.
1427 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1428 free_page_t put_new_page, unsigned long private,
1429 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1432 int large_retry = 1;
1435 int nr_failed_pages = 0;
1436 int nr_retry_pages = 0;
1437 int nr_succeeded = 0;
1438 int nr_thp_succeeded = 0;
1439 int nr_large_failed = 0;
1440 int nr_thp_failed = 0;
1441 int nr_thp_split = 0;
1443 bool is_large = false;
1444 bool is_thp = false;
1445 struct folio *folio, *folio2;
1447 LIST_HEAD(ret_folios);
1448 LIST_HEAD(split_folios);
1449 bool nosplit = (reason == MR_NUMA_MISPLACED);
1450 bool no_split_folio_counting = false;
1452 trace_mm_migrate_pages_start(mode, reason);
1454 split_folio_migration:
1455 for (pass = 0; pass < 10 && (retry || large_retry); pass++) {
1461 list_for_each_entry_safe(folio, folio2, from, lru) {
1463 * Large folio statistics is based on the source large
1464 * folio. Capture required information that might get
1465 * lost during migration.
1467 is_large = folio_test_large(folio) && !folio_test_hugetlb(folio);
1468 is_thp = is_large && folio_test_pmd_mappable(folio);
1469 nr_pages = folio_nr_pages(folio);
1472 if (folio_test_hugetlb(folio))
1473 rc = unmap_and_move_huge_page(get_new_page,
1474 put_new_page, private,
1475 &folio->page, pass > 2, mode,
1479 rc = unmap_and_move(get_new_page, put_new_page,
1480 private, folio, pass > 2, mode,
1481 reason, &ret_folios);
1484 * Success: non hugetlb folio will be freed, hugetlb
1485 * folio will be put back
1486 * -EAGAIN: stay on the from list
1487 * -ENOMEM: stay on the from list
1488 * -ENOSYS: stay on the from list
1489 * Other errno: put on ret_folios list then splice to
1494 * Large folio migration might be unsupported or
1495 * the allocation could've failed so we should retry
1496 * on the same folio with the large folio split
1499 * Split folios are put in split_folios, and
1500 * we will migrate them after the rest of the
1501 * list is processed.
1504 /* Large folio migration is unsupported */
1507 nr_thp_failed += is_thp;
1508 if (!try_split_folio(folio, &split_folios)) {
1509 nr_thp_split += is_thp;
1512 /* Hugetlb migration is unsupported */
1513 } else if (!no_split_folio_counting) {
1517 nr_failed_pages += nr_pages;
1518 list_move_tail(&folio->lru, &ret_folios);
1522 * When memory is low, don't bother to try to migrate
1523 * other folios, just exit.
1527 nr_thp_failed += is_thp;
1528 /* Large folio NUMA faulting doesn't split to retry. */
1530 int ret = try_split_folio(folio, &split_folios);
1533 nr_thp_split += is_thp;
1535 } else if (reason == MR_LONGTERM_PIN &&
1538 * Try again to split large folio to
1539 * mitigate the failure of longterm pinning.
1542 thp_retry += is_thp;
1543 nr_retry_pages += nr_pages;
1547 } else if (!no_split_folio_counting) {
1551 nr_failed_pages += nr_pages + nr_retry_pages;
1553 * There might be some split folios of fail-to-migrate large
1554 * folios left in split_folios list. Move them back to migration
1555 * list so that they could be put back to the right list by
1556 * the caller otherwise the folio refcnt will be leaked.
1558 list_splice_init(&split_folios, from);
1559 /* nr_failed isn't updated for not used */
1560 nr_large_failed += large_retry;
1561 nr_thp_failed += thp_retry;
1566 thp_retry += is_thp;
1567 } else if (!no_split_folio_counting) {
1570 nr_retry_pages += nr_pages;
1572 case MIGRATEPAGE_SUCCESS:
1573 nr_succeeded += nr_pages;
1574 nr_thp_succeeded += is_thp;
1578 * Permanent failure (-EBUSY, etc.):
1579 * unlike -EAGAIN case, the failed folio is
1580 * removed from migration folio list and not
1581 * retried in the next outer loop.
1585 nr_thp_failed += is_thp;
1586 } else if (!no_split_folio_counting) {
1590 nr_failed_pages += nr_pages;
1596 nr_large_failed += large_retry;
1597 nr_thp_failed += thp_retry;
1598 nr_failed_pages += nr_retry_pages;
1600 * Try to migrate split folios of fail-to-migrate large folios, no
1601 * nr_failed counting in this round, since all split folios of a
1602 * large folio is counted as 1 failure in the first round.
1604 if (!list_empty(&split_folios)) {
1606 * Move non-migrated folios (after 10 retries) to ret_folios
1607 * to avoid migrating them again.
1609 list_splice_init(from, &ret_folios);
1610 list_splice_init(&split_folios, from);
1611 no_split_folio_counting = true;
1613 goto split_folio_migration;
1616 rc = nr_failed + nr_large_failed;
1619 * Put the permanent failure folio back to migration list, they
1620 * will be put back to the right list by the caller.
1622 list_splice(&ret_folios, from);
1625 * Return 0 in case all split folios of fail-to-migrate large folios
1626 * are migrated successfully.
1628 if (list_empty(from))
1631 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1632 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
1633 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1634 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1635 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1636 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
1637 nr_thp_failed, nr_thp_split, mode, reason);
1640 *ret_succeeded = nr_succeeded;
1645 struct page *alloc_migration_target(struct page *page, unsigned long private)
1647 struct folio *folio = page_folio(page);
1648 struct migration_target_control *mtc;
1650 unsigned int order = 0;
1651 struct folio *new_folio = NULL;
1655 mtc = (struct migration_target_control *)private;
1656 gfp_mask = mtc->gfp_mask;
1658 if (nid == NUMA_NO_NODE)
1659 nid = folio_nid(folio);
1661 if (folio_test_hugetlb(folio)) {
1662 struct hstate *h = folio_hstate(folio);
1664 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1665 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1668 if (folio_test_large(folio)) {
1670 * clear __GFP_RECLAIM to make the migration callback
1671 * consistent with regular THP allocations.
1673 gfp_mask &= ~__GFP_RECLAIM;
1674 gfp_mask |= GFP_TRANSHUGE;
1675 order = folio_order(folio);
1677 zidx = zone_idx(folio_zone(folio));
1678 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1679 gfp_mask |= __GFP_HIGHMEM;
1681 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
1683 return &new_folio->page;
1688 static int store_status(int __user *status, int start, int value, int nr)
1691 if (put_user(value, status + start))
1699 static int do_move_pages_to_node(struct mm_struct *mm,
1700 struct list_head *pagelist, int node)
1703 struct migration_target_control mtc = {
1705 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1708 err = migrate_pages(pagelist, alloc_migration_target, NULL,
1709 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1711 putback_movable_pages(pagelist);
1716 * Resolves the given address to a struct page, isolates it from the LRU and
1717 * puts it to the given pagelist.
1719 * errno - if the page cannot be found/isolated
1720 * 0 - when it doesn't have to be migrated because it is already on the
1722 * 1 - when it has been queued
1724 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1725 int node, struct list_head *pagelist, bool migrate_all)
1727 struct vm_area_struct *vma;
1733 vma = vma_lookup(mm, addr);
1734 if (!vma || !vma_migratable(vma))
1737 /* FOLL_DUMP to ignore special (like zero) pages */
1738 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1740 err = PTR_ERR(page);
1748 if (is_zone_device_page(page))
1752 if (page_to_nid(page) == node)
1756 if (page_mapcount(page) > 1 && !migrate_all)
1759 if (PageHuge(page)) {
1760 if (PageHead(page)) {
1761 err = isolate_hugetlb(page, pagelist);
1768 head = compound_head(page);
1769 err = isolate_lru_page(head);
1774 list_add_tail(&head->lru, pagelist);
1775 mod_node_page_state(page_pgdat(head),
1776 NR_ISOLATED_ANON + page_is_file_lru(head),
1777 thp_nr_pages(head));
1781 * Either remove the duplicate refcount from
1782 * isolate_lru_page() or drop the page ref if it was
1787 mmap_read_unlock(mm);
1791 static int move_pages_and_store_status(struct mm_struct *mm, int node,
1792 struct list_head *pagelist, int __user *status,
1793 int start, int i, unsigned long nr_pages)
1797 if (list_empty(pagelist))
1800 err = do_move_pages_to_node(mm, pagelist, node);
1803 * Positive err means the number of failed
1804 * pages to migrate. Since we are going to
1805 * abort and return the number of non-migrated
1806 * pages, so need to include the rest of the
1807 * nr_pages that have not been attempted as
1811 err += nr_pages - i;
1814 return store_status(status, start, node, i - start);
1818 * Migrate an array of page address onto an array of nodes and fill
1819 * the corresponding array of status.
1821 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1822 unsigned long nr_pages,
1823 const void __user * __user *pages,
1824 const int __user *nodes,
1825 int __user *status, int flags)
1827 int current_node = NUMA_NO_NODE;
1828 LIST_HEAD(pagelist);
1832 lru_cache_disable();
1834 for (i = start = 0; i < nr_pages; i++) {
1835 const void __user *p;
1840 if (get_user(p, pages + i))
1842 if (get_user(node, nodes + i))
1844 addr = (unsigned long)untagged_addr(p);
1847 if (node < 0 || node >= MAX_NUMNODES)
1849 if (!node_state(node, N_MEMORY))
1853 if (!node_isset(node, task_nodes))
1856 if (current_node == NUMA_NO_NODE) {
1857 current_node = node;
1859 } else if (node != current_node) {
1860 err = move_pages_and_store_status(mm, current_node,
1861 &pagelist, status, start, i, nr_pages);
1865 current_node = node;
1869 * Errors in the page lookup or isolation are not fatal and we simply
1870 * report them via status
1872 err = add_page_for_migration(mm, addr, current_node,
1873 &pagelist, flags & MPOL_MF_MOVE_ALL);
1876 /* The page is successfully queued for migration */
1881 * The move_pages() man page does not have an -EEXIST choice, so
1882 * use -EFAULT instead.
1888 * If the page is already on the target node (!err), store the
1889 * node, otherwise, store the err.
1891 err = store_status(status, i, err ? : current_node, 1);
1895 err = move_pages_and_store_status(mm, current_node, &pagelist,
1896 status, start, i, nr_pages);
1898 /* We have accounted for page i */
1903 current_node = NUMA_NO_NODE;
1906 /* Make sure we do not overwrite the existing error */
1907 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1908 status, start, i, nr_pages);
1917 * Determine the nodes of an array of pages and store it in an array of status.
1919 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1920 const void __user **pages, int *status)
1926 for (i = 0; i < nr_pages; i++) {
1927 unsigned long addr = (unsigned long)(*pages);
1928 struct vm_area_struct *vma;
1932 vma = vma_lookup(mm, addr);
1936 /* FOLL_DUMP to ignore special (like zero) pages */
1937 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
1939 err = PTR_ERR(page);
1947 if (!is_zone_device_page(page))
1948 err = page_to_nid(page);
1958 mmap_read_unlock(mm);
1961 static int get_compat_pages_array(const void __user *chunk_pages[],
1962 const void __user * __user *pages,
1963 unsigned long chunk_nr)
1965 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1969 for (i = 0; i < chunk_nr; i++) {
1970 if (get_user(p, pages32 + i))
1972 chunk_pages[i] = compat_ptr(p);
1979 * Determine the nodes of a user array of pages and store it in
1980 * a user array of status.
1982 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1983 const void __user * __user *pages,
1986 #define DO_PAGES_STAT_CHUNK_NR 16UL
1987 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1988 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1991 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
1993 if (in_compat_syscall()) {
1994 if (get_compat_pages_array(chunk_pages, pages,
1998 if (copy_from_user(chunk_pages, pages,
1999 chunk_nr * sizeof(*chunk_pages)))
2003 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2005 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2010 nr_pages -= chunk_nr;
2012 return nr_pages ? -EFAULT : 0;
2015 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2017 struct task_struct *task;
2018 struct mm_struct *mm;
2021 * There is no need to check if current process has the right to modify
2022 * the specified process when they are same.
2026 *mem_nodes = cpuset_mems_allowed(current);
2030 /* Find the mm_struct */
2032 task = find_task_by_vpid(pid);
2035 return ERR_PTR(-ESRCH);
2037 get_task_struct(task);
2040 * Check if this process has the right to modify the specified
2041 * process. Use the regular "ptrace_may_access()" checks.
2043 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2045 mm = ERR_PTR(-EPERM);
2050 mm = ERR_PTR(security_task_movememory(task));
2053 *mem_nodes = cpuset_mems_allowed(task);
2054 mm = get_task_mm(task);
2056 put_task_struct(task);
2058 mm = ERR_PTR(-EINVAL);
2063 * Move a list of pages in the address space of the currently executing
2066 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2067 const void __user * __user *pages,
2068 const int __user *nodes,
2069 int __user *status, int flags)
2071 struct mm_struct *mm;
2073 nodemask_t task_nodes;
2076 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2079 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2082 mm = find_mm_struct(pid, &task_nodes);
2087 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2088 nodes, status, flags);
2090 err = do_pages_stat(mm, nr_pages, pages, status);
2096 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2097 const void __user * __user *, pages,
2098 const int __user *, nodes,
2099 int __user *, status, int, flags)
2101 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2104 #ifdef CONFIG_NUMA_BALANCING
2106 * Returns true if this is a safe migration target node for misplaced NUMA
2107 * pages. Currently it only checks the watermarks which is crude.
2109 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2110 unsigned long nr_migrate_pages)
2114 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2115 struct zone *zone = pgdat->node_zones + z;
2117 if (!managed_zone(zone))
2120 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2121 if (!zone_watermark_ok(zone, 0,
2122 high_wmark_pages(zone) +
2131 static struct page *alloc_misplaced_dst_page(struct page *page,
2134 int nid = (int) data;
2135 int order = compound_order(page);
2136 gfp_t gfp = __GFP_THISNODE;
2140 gfp |= GFP_TRANSHUGE_LIGHT;
2142 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2144 gfp &= ~__GFP_RECLAIM;
2146 new = __folio_alloc_node(gfp, order, nid);
2151 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2153 int nr_pages = thp_nr_pages(page);
2154 int order = compound_order(page);
2156 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
2158 /* Do not migrate THP mapped by multiple processes */
2159 if (PageTransHuge(page) && total_mapcount(page) > 1)
2162 /* Avoid migrating to a node that is nearly full */
2163 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2166 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2168 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2169 if (managed_zone(pgdat->node_zones + z))
2172 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
2176 if (isolate_lru_page(page))
2179 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2183 * Isolating the page has taken another reference, so the
2184 * caller's reference can be safely dropped without the page
2185 * disappearing underneath us during migration.
2192 * Attempt to migrate a misplaced page to the specified destination
2193 * node. Caller is expected to have an elevated reference count on
2194 * the page that will be dropped by this function before returning.
2196 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2199 pg_data_t *pgdat = NODE_DATA(node);
2202 unsigned int nr_succeeded;
2203 LIST_HEAD(migratepages);
2204 int nr_pages = thp_nr_pages(page);
2207 * Don't migrate file pages that are mapped in multiple processes
2208 * with execute permissions as they are probably shared libraries.
2210 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2211 (vma->vm_flags & VM_EXEC))
2215 * Also do not migrate dirty pages as not all filesystems can move
2216 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2218 if (page_is_file_lru(page) && PageDirty(page))
2221 isolated = numamigrate_isolate_page(pgdat, page);
2225 list_add(&page->lru, &migratepages);
2226 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2227 NULL, node, MIGRATE_ASYNC,
2228 MR_NUMA_MISPLACED, &nr_succeeded);
2230 if (!list_empty(&migratepages)) {
2231 list_del(&page->lru);
2232 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2233 page_is_file_lru(page), -nr_pages);
2234 putback_lru_page(page);
2239 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2240 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2241 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2244 BUG_ON(!list_empty(&migratepages));
2251 #endif /* CONFIG_NUMA_BALANCING */
2252 #endif /* CONFIG_NUMA */