mm: migrate: lock buffers before migrate_page_move_mapping()
[linux-2.6-microblaze.git] / mm / migrate.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pfn_t.h>
42 #include <linux/memremap.h>
43 #include <linux/userfaultfd_k.h>
44 #include <linux/balloon_compaction.h>
45 #include <linux/mmu_notifier.h>
46 #include <linux/page_idle.h>
47 #include <linux/page_owner.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ptrace.h>
50
51 #include <asm/tlbflush.h>
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/migrate.h>
55
56 #include "internal.h"
57
58 /*
59  * migrate_prep() needs to be called before we start compiling a list of pages
60  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
61  * undesirable, use migrate_prep_local()
62  */
63 int migrate_prep(void)
64 {
65         /*
66          * Clear the LRU lists so pages can be isolated.
67          * Note that pages may be moved off the LRU after we have
68          * drained them. Those pages will fail to migrate like other
69          * pages that may be busy.
70          */
71         lru_add_drain_all();
72
73         return 0;
74 }
75
76 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
77 int migrate_prep_local(void)
78 {
79         lru_add_drain();
80
81         return 0;
82 }
83
84 int isolate_movable_page(struct page *page, isolate_mode_t mode)
85 {
86         struct address_space *mapping;
87
88         /*
89          * Avoid burning cycles with pages that are yet under __free_pages(),
90          * or just got freed under us.
91          *
92          * In case we 'win' a race for a movable page being freed under us and
93          * raise its refcount preventing __free_pages() from doing its job
94          * the put_page() at the end of this block will take care of
95          * release this page, thus avoiding a nasty leakage.
96          */
97         if (unlikely(!get_page_unless_zero(page)))
98                 goto out;
99
100         /*
101          * Check PageMovable before holding a PG_lock because page's owner
102          * assumes anybody doesn't touch PG_lock of newly allocated page
103          * so unconditionally grapping the lock ruins page's owner side.
104          */
105         if (unlikely(!__PageMovable(page)))
106                 goto out_putpage;
107         /*
108          * As movable pages are not isolated from LRU lists, concurrent
109          * compaction threads can race against page migration functions
110          * as well as race against the releasing a page.
111          *
112          * In order to avoid having an already isolated movable page
113          * being (wrongly) re-isolated while it is under migration,
114          * or to avoid attempting to isolate pages being released,
115          * lets be sure we have the page lock
116          * before proceeding with the movable page isolation steps.
117          */
118         if (unlikely(!trylock_page(page)))
119                 goto out_putpage;
120
121         if (!PageMovable(page) || PageIsolated(page))
122                 goto out_no_isolated;
123
124         mapping = page_mapping(page);
125         VM_BUG_ON_PAGE(!mapping, page);
126
127         if (!mapping->a_ops->isolate_page(page, mode))
128                 goto out_no_isolated;
129
130         /* Driver shouldn't use PG_isolated bit of page->flags */
131         WARN_ON_ONCE(PageIsolated(page));
132         __SetPageIsolated(page);
133         unlock_page(page);
134
135         return 0;
136
137 out_no_isolated:
138         unlock_page(page);
139 out_putpage:
140         put_page(page);
141 out:
142         return -EBUSY;
143 }
144
145 /* It should be called on page which is PG_movable */
146 void putback_movable_page(struct page *page)
147 {
148         struct address_space *mapping;
149
150         VM_BUG_ON_PAGE(!PageLocked(page), page);
151         VM_BUG_ON_PAGE(!PageMovable(page), page);
152         VM_BUG_ON_PAGE(!PageIsolated(page), page);
153
154         mapping = page_mapping(page);
155         mapping->a_ops->putback_page(page);
156         __ClearPageIsolated(page);
157 }
158
159 /*
160  * Put previously isolated pages back onto the appropriate lists
161  * from where they were once taken off for compaction/migration.
162  *
163  * This function shall be used whenever the isolated pageset has been
164  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
165  * and isolate_huge_page().
166  */
167 void putback_movable_pages(struct list_head *l)
168 {
169         struct page *page;
170         struct page *page2;
171
172         list_for_each_entry_safe(page, page2, l, lru) {
173                 if (unlikely(PageHuge(page))) {
174                         putback_active_hugepage(page);
175                         continue;
176                 }
177                 list_del(&page->lru);
178                 /*
179                  * We isolated non-lru movable page so here we can use
180                  * __PageMovable because LRU page's mapping cannot have
181                  * PAGE_MAPPING_MOVABLE.
182                  */
183                 if (unlikely(__PageMovable(page))) {
184                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
185                         lock_page(page);
186                         if (PageMovable(page))
187                                 putback_movable_page(page);
188                         else
189                                 __ClearPageIsolated(page);
190                         unlock_page(page);
191                         put_page(page);
192                 } else {
193                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
194                                         page_is_file_cache(page), -hpage_nr_pages(page));
195                         putback_lru_page(page);
196                 }
197         }
198 }
199
200 /*
201  * Restore a potential migration pte to a working pte entry
202  */
203 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
204                                  unsigned long addr, void *old)
205 {
206         struct page_vma_mapped_walk pvmw = {
207                 .page = old,
208                 .vma = vma,
209                 .address = addr,
210                 .flags = PVMW_SYNC | PVMW_MIGRATION,
211         };
212         struct page *new;
213         pte_t pte;
214         swp_entry_t entry;
215
216         VM_BUG_ON_PAGE(PageTail(page), page);
217         while (page_vma_mapped_walk(&pvmw)) {
218                 if (PageKsm(page))
219                         new = page;
220                 else
221                         new = page - pvmw.page->index +
222                                 linear_page_index(vma, pvmw.address);
223
224 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
225                 /* PMD-mapped THP migration entry */
226                 if (!pvmw.pte) {
227                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
228                         remove_migration_pmd(&pvmw, new);
229                         continue;
230                 }
231 #endif
232
233                 get_page(new);
234                 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
235                 if (pte_swp_soft_dirty(*pvmw.pte))
236                         pte = pte_mksoft_dirty(pte);
237
238                 /*
239                  * Recheck VMA as permissions can change since migration started
240                  */
241                 entry = pte_to_swp_entry(*pvmw.pte);
242                 if (is_write_migration_entry(entry))
243                         pte = maybe_mkwrite(pte, vma);
244
245                 if (unlikely(is_zone_device_page(new))) {
246                         if (is_device_private_page(new)) {
247                                 entry = make_device_private_entry(new, pte_write(pte));
248                                 pte = swp_entry_to_pte(entry);
249                         } else if (is_device_public_page(new)) {
250                                 pte = pte_mkdevmap(pte);
251                                 flush_dcache_page(new);
252                         }
253                 } else
254                         flush_dcache_page(new);
255
256 #ifdef CONFIG_HUGETLB_PAGE
257                 if (PageHuge(new)) {
258                         pte = pte_mkhuge(pte);
259                         pte = arch_make_huge_pte(pte, vma, new, 0);
260                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
261                         if (PageAnon(new))
262                                 hugepage_add_anon_rmap(new, vma, pvmw.address);
263                         else
264                                 page_dup_rmap(new, true);
265                 } else
266 #endif
267                 {
268                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
269
270                         if (PageAnon(new))
271                                 page_add_anon_rmap(new, vma, pvmw.address, false);
272                         else
273                                 page_add_file_rmap(new, false);
274                 }
275                 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
276                         mlock_vma_page(new);
277
278                 if (PageTransHuge(page) && PageMlocked(page))
279                         clear_page_mlock(page);
280
281                 /* No need to invalidate - it was non-present before */
282                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
283         }
284
285         return true;
286 }
287
288 /*
289  * Get rid of all migration entries and replace them by
290  * references to the indicated page.
291  */
292 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
293 {
294         struct rmap_walk_control rwc = {
295                 .rmap_one = remove_migration_pte,
296                 .arg = old,
297         };
298
299         if (locked)
300                 rmap_walk_locked(new, &rwc);
301         else
302                 rmap_walk(new, &rwc);
303 }
304
305 /*
306  * Something used the pte of a page under migration. We need to
307  * get to the page and wait until migration is finished.
308  * When we return from this function the fault will be retried.
309  */
310 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
311                                 spinlock_t *ptl)
312 {
313         pte_t pte;
314         swp_entry_t entry;
315         struct page *page;
316
317         spin_lock(ptl);
318         pte = *ptep;
319         if (!is_swap_pte(pte))
320                 goto out;
321
322         entry = pte_to_swp_entry(pte);
323         if (!is_migration_entry(entry))
324                 goto out;
325
326         page = migration_entry_to_page(entry);
327
328         /*
329          * Once page cache replacement of page migration started, page_count
330          * is zero; but we must not call put_and_wait_on_page_locked() without
331          * a ref. Use get_page_unless_zero(), and just fault again if it fails.
332          */
333         if (!get_page_unless_zero(page))
334                 goto out;
335         pte_unmap_unlock(ptep, ptl);
336         put_and_wait_on_page_locked(page);
337         return;
338 out:
339         pte_unmap_unlock(ptep, ptl);
340 }
341
342 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
343                                 unsigned long address)
344 {
345         spinlock_t *ptl = pte_lockptr(mm, pmd);
346         pte_t *ptep = pte_offset_map(pmd, address);
347         __migration_entry_wait(mm, ptep, ptl);
348 }
349
350 void migration_entry_wait_huge(struct vm_area_struct *vma,
351                 struct mm_struct *mm, pte_t *pte)
352 {
353         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
354         __migration_entry_wait(mm, pte, ptl);
355 }
356
357 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
358 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
359 {
360         spinlock_t *ptl;
361         struct page *page;
362
363         ptl = pmd_lock(mm, pmd);
364         if (!is_pmd_migration_entry(*pmd))
365                 goto unlock;
366         page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
367         if (!get_page_unless_zero(page))
368                 goto unlock;
369         spin_unlock(ptl);
370         put_and_wait_on_page_locked(page);
371         return;
372 unlock:
373         spin_unlock(ptl);
374 }
375 #endif
376
377 #ifdef CONFIG_BLOCK
378 /* Returns true if all buffers are successfully locked */
379 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
380                                                         enum migrate_mode mode)
381 {
382         struct buffer_head *bh = head;
383
384         /* Simple case, sync compaction */
385         if (mode != MIGRATE_ASYNC) {
386                 do {
387                         get_bh(bh);
388                         lock_buffer(bh);
389                         bh = bh->b_this_page;
390
391                 } while (bh != head);
392
393                 return true;
394         }
395
396         /* async case, we cannot block on lock_buffer so use trylock_buffer */
397         do {
398                 get_bh(bh);
399                 if (!trylock_buffer(bh)) {
400                         /*
401                          * We failed to lock the buffer and cannot stall in
402                          * async migration. Release the taken locks
403                          */
404                         struct buffer_head *failed_bh = bh;
405                         put_bh(failed_bh);
406                         bh = head;
407                         while (bh != failed_bh) {
408                                 unlock_buffer(bh);
409                                 put_bh(bh);
410                                 bh = bh->b_this_page;
411                         }
412                         return false;
413                 }
414
415                 bh = bh->b_this_page;
416         } while (bh != head);
417         return true;
418 }
419 #else
420 static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
421                                                         enum migrate_mode mode)
422 {
423         return true;
424 }
425 #endif /* CONFIG_BLOCK */
426
427 static int expected_page_refs(struct page *page)
428 {
429         int expected_count = 1;
430
431         /*
432          * Device public or private pages have an extra refcount as they are
433          * ZONE_DEVICE pages.
434          */
435         expected_count += is_device_private_page(page);
436         expected_count += is_device_public_page(page);
437         if (page_mapping(page))
438                 expected_count += hpage_nr_pages(page) + page_has_private(page);
439
440         return expected_count;
441 }
442
443 /*
444  * Replace the page in the mapping.
445  *
446  * The number of remaining references must be:
447  * 1 for anonymous pages without a mapping
448  * 2 for pages with a mapping
449  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
450  */
451 int migrate_page_move_mapping(struct address_space *mapping,
452                 struct page *newpage, struct page *page,
453                 struct buffer_head *head, enum migrate_mode mode,
454                 int extra_count)
455 {
456         XA_STATE(xas, &mapping->i_pages, page_index(page));
457         struct zone *oldzone, *newzone;
458         int dirty;
459         int expected_count = expected_page_refs(page) + extra_count;
460
461         if (!mapping) {
462                 /* Anonymous page without mapping */
463                 if (page_count(page) != expected_count)
464                         return -EAGAIN;
465
466                 /* No turning back from here */
467                 newpage->index = page->index;
468                 newpage->mapping = page->mapping;
469                 if (PageSwapBacked(page))
470                         __SetPageSwapBacked(newpage);
471
472                 return MIGRATEPAGE_SUCCESS;
473         }
474
475         oldzone = page_zone(page);
476         newzone = page_zone(newpage);
477
478         xas_lock_irq(&xas);
479         if (page_count(page) != expected_count || xas_load(&xas) != page) {
480                 xas_unlock_irq(&xas);
481                 return -EAGAIN;
482         }
483
484         if (!page_ref_freeze(page, expected_count)) {
485                 xas_unlock_irq(&xas);
486                 return -EAGAIN;
487         }
488
489         /*
490          * Now we know that no one else is looking at the page:
491          * no turning back from here.
492          */
493         newpage->index = page->index;
494         newpage->mapping = page->mapping;
495         page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
496         if (PageSwapBacked(page)) {
497                 __SetPageSwapBacked(newpage);
498                 if (PageSwapCache(page)) {
499                         SetPageSwapCache(newpage);
500                         set_page_private(newpage, page_private(page));
501                 }
502         } else {
503                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
504         }
505
506         /* Move dirty while page refs frozen and newpage not yet exposed */
507         dirty = PageDirty(page);
508         if (dirty) {
509                 ClearPageDirty(page);
510                 SetPageDirty(newpage);
511         }
512
513         xas_store(&xas, newpage);
514         if (PageTransHuge(page)) {
515                 int i;
516
517                 for (i = 1; i < HPAGE_PMD_NR; i++) {
518                         xas_next(&xas);
519                         xas_store(&xas, newpage + i);
520                 }
521         }
522
523         /*
524          * Drop cache reference from old page by unfreezing
525          * to one less reference.
526          * We know this isn't the last reference.
527          */
528         page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
529
530         xas_unlock(&xas);
531         /* Leave irq disabled to prevent preemption while updating stats */
532
533         /*
534          * If moved to a different zone then also account
535          * the page for that zone. Other VM counters will be
536          * taken care of when we establish references to the
537          * new page and drop references to the old page.
538          *
539          * Note that anonymous pages are accounted for
540          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
541          * are mapped to swap space.
542          */
543         if (newzone != oldzone) {
544                 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
545                 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
546                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
547                         __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
548                         __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
549                 }
550                 if (dirty && mapping_cap_account_dirty(mapping)) {
551                         __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
552                         __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
553                         __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
554                         __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
555                 }
556         }
557         local_irq_enable();
558
559         return MIGRATEPAGE_SUCCESS;
560 }
561 EXPORT_SYMBOL(migrate_page_move_mapping);
562
563 /*
564  * The expected number of remaining references is the same as that
565  * of migrate_page_move_mapping().
566  */
567 int migrate_huge_page_move_mapping(struct address_space *mapping,
568                                    struct page *newpage, struct page *page)
569 {
570         XA_STATE(xas, &mapping->i_pages, page_index(page));
571         int expected_count;
572
573         xas_lock_irq(&xas);
574         expected_count = 2 + page_has_private(page);
575         if (page_count(page) != expected_count || xas_load(&xas) != page) {
576                 xas_unlock_irq(&xas);
577                 return -EAGAIN;
578         }
579
580         if (!page_ref_freeze(page, expected_count)) {
581                 xas_unlock_irq(&xas);
582                 return -EAGAIN;
583         }
584
585         newpage->index = page->index;
586         newpage->mapping = page->mapping;
587
588         get_page(newpage);
589
590         xas_store(&xas, newpage);
591
592         page_ref_unfreeze(page, expected_count - 1);
593
594         xas_unlock_irq(&xas);
595
596         return MIGRATEPAGE_SUCCESS;
597 }
598
599 /*
600  * Gigantic pages are so large that we do not guarantee that page++ pointer
601  * arithmetic will work across the entire page.  We need something more
602  * specialized.
603  */
604 static void __copy_gigantic_page(struct page *dst, struct page *src,
605                                 int nr_pages)
606 {
607         int i;
608         struct page *dst_base = dst;
609         struct page *src_base = src;
610
611         for (i = 0; i < nr_pages; ) {
612                 cond_resched();
613                 copy_highpage(dst, src);
614
615                 i++;
616                 dst = mem_map_next(dst, dst_base, i);
617                 src = mem_map_next(src, src_base, i);
618         }
619 }
620
621 static void copy_huge_page(struct page *dst, struct page *src)
622 {
623         int i;
624         int nr_pages;
625
626         if (PageHuge(src)) {
627                 /* hugetlbfs page */
628                 struct hstate *h = page_hstate(src);
629                 nr_pages = pages_per_huge_page(h);
630
631                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
632                         __copy_gigantic_page(dst, src, nr_pages);
633                         return;
634                 }
635         } else {
636                 /* thp page */
637                 BUG_ON(!PageTransHuge(src));
638                 nr_pages = hpage_nr_pages(src);
639         }
640
641         for (i = 0; i < nr_pages; i++) {
642                 cond_resched();
643                 copy_highpage(dst + i, src + i);
644         }
645 }
646
647 /*
648  * Copy the page to its new location
649  */
650 void migrate_page_states(struct page *newpage, struct page *page)
651 {
652         int cpupid;
653
654         if (PageError(page))
655                 SetPageError(newpage);
656         if (PageReferenced(page))
657                 SetPageReferenced(newpage);
658         if (PageUptodate(page))
659                 SetPageUptodate(newpage);
660         if (TestClearPageActive(page)) {
661                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
662                 SetPageActive(newpage);
663         } else if (TestClearPageUnevictable(page))
664                 SetPageUnevictable(newpage);
665         if (PageWorkingset(page))
666                 SetPageWorkingset(newpage);
667         if (PageChecked(page))
668                 SetPageChecked(newpage);
669         if (PageMappedToDisk(page))
670                 SetPageMappedToDisk(newpage);
671
672         /* Move dirty on pages not done by migrate_page_move_mapping() */
673         if (PageDirty(page))
674                 SetPageDirty(newpage);
675
676         if (page_is_young(page))
677                 set_page_young(newpage);
678         if (page_is_idle(page))
679                 set_page_idle(newpage);
680
681         /*
682          * Copy NUMA information to the new page, to prevent over-eager
683          * future migrations of this same page.
684          */
685         cpupid = page_cpupid_xchg_last(page, -1);
686         page_cpupid_xchg_last(newpage, cpupid);
687
688         ksm_migrate_page(newpage, page);
689         /*
690          * Please do not reorder this without considering how mm/ksm.c's
691          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
692          */
693         if (PageSwapCache(page))
694                 ClearPageSwapCache(page);
695         ClearPagePrivate(page);
696         set_page_private(page, 0);
697
698         /*
699          * If any waiters have accumulated on the new page then
700          * wake them up.
701          */
702         if (PageWriteback(newpage))
703                 end_page_writeback(newpage);
704
705         copy_page_owner(page, newpage);
706
707         mem_cgroup_migrate(page, newpage);
708 }
709 EXPORT_SYMBOL(migrate_page_states);
710
711 void migrate_page_copy(struct page *newpage, struct page *page)
712 {
713         if (PageHuge(page) || PageTransHuge(page))
714                 copy_huge_page(newpage, page);
715         else
716                 copy_highpage(newpage, page);
717
718         migrate_page_states(newpage, page);
719 }
720 EXPORT_SYMBOL(migrate_page_copy);
721
722 /************************************************************
723  *                    Migration functions
724  ***********************************************************/
725
726 /*
727  * Common logic to directly migrate a single LRU page suitable for
728  * pages that do not use PagePrivate/PagePrivate2.
729  *
730  * Pages are locked upon entry and exit.
731  */
732 int migrate_page(struct address_space *mapping,
733                 struct page *newpage, struct page *page,
734                 enum migrate_mode mode)
735 {
736         int rc;
737
738         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
739
740         rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
741
742         if (rc != MIGRATEPAGE_SUCCESS)
743                 return rc;
744
745         if (mode != MIGRATE_SYNC_NO_COPY)
746                 migrate_page_copy(newpage, page);
747         else
748                 migrate_page_states(newpage, page);
749         return MIGRATEPAGE_SUCCESS;
750 }
751 EXPORT_SYMBOL(migrate_page);
752
753 #ifdef CONFIG_BLOCK
754 /*
755  * Migration function for pages with buffers. This function can only be used
756  * if the underlying filesystem guarantees that no other references to "page"
757  * exist.
758  */
759 int buffer_migrate_page(struct address_space *mapping,
760                 struct page *newpage, struct page *page, enum migrate_mode mode)
761 {
762         struct buffer_head *bh, *head;
763         int rc;
764         int expected_count;
765
766         if (!page_has_buffers(page))
767                 return migrate_page(mapping, newpage, page, mode);
768
769         /* Check whether page does not have extra refs before we do more work */
770         expected_count = expected_page_refs(page);
771         if (page_count(page) != expected_count)
772                 return -EAGAIN;
773
774         head = page_buffers(page);
775         if (!buffer_migrate_lock_buffers(head, mode))
776                 return -EAGAIN;
777
778         rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
779         if (rc != MIGRATEPAGE_SUCCESS)
780                 goto unlock_buffers;
781
782         ClearPagePrivate(page);
783         set_page_private(newpage, page_private(page));
784         set_page_private(page, 0);
785         put_page(page);
786         get_page(newpage);
787
788         bh = head;
789         do {
790                 set_bh_page(bh, newpage, bh_offset(bh));
791                 bh = bh->b_this_page;
792
793         } while (bh != head);
794
795         SetPagePrivate(newpage);
796
797         if (mode != MIGRATE_SYNC_NO_COPY)
798                 migrate_page_copy(newpage, page);
799         else
800                 migrate_page_states(newpage, page);
801
802         rc = MIGRATEPAGE_SUCCESS;
803 unlock_buffers:
804         bh = head;
805         do {
806                 unlock_buffer(bh);
807                 put_bh(bh);
808                 bh = bh->b_this_page;
809
810         } while (bh != head);
811
812         return rc;
813 }
814 EXPORT_SYMBOL(buffer_migrate_page);
815 #endif
816
817 /*
818  * Writeback a page to clean the dirty state
819  */
820 static int writeout(struct address_space *mapping, struct page *page)
821 {
822         struct writeback_control wbc = {
823                 .sync_mode = WB_SYNC_NONE,
824                 .nr_to_write = 1,
825                 .range_start = 0,
826                 .range_end = LLONG_MAX,
827                 .for_reclaim = 1
828         };
829         int rc;
830
831         if (!mapping->a_ops->writepage)
832                 /* No write method for the address space */
833                 return -EINVAL;
834
835         if (!clear_page_dirty_for_io(page))
836                 /* Someone else already triggered a write */
837                 return -EAGAIN;
838
839         /*
840          * A dirty page may imply that the underlying filesystem has
841          * the page on some queue. So the page must be clean for
842          * migration. Writeout may mean we loose the lock and the
843          * page state is no longer what we checked for earlier.
844          * At this point we know that the migration attempt cannot
845          * be successful.
846          */
847         remove_migration_ptes(page, page, false);
848
849         rc = mapping->a_ops->writepage(page, &wbc);
850
851         if (rc != AOP_WRITEPAGE_ACTIVATE)
852                 /* unlocked. Relock */
853                 lock_page(page);
854
855         return (rc < 0) ? -EIO : -EAGAIN;
856 }
857
858 /*
859  * Default handling if a filesystem does not provide a migration function.
860  */
861 static int fallback_migrate_page(struct address_space *mapping,
862         struct page *newpage, struct page *page, enum migrate_mode mode)
863 {
864         if (PageDirty(page)) {
865                 /* Only writeback pages in full synchronous migration */
866                 switch (mode) {
867                 case MIGRATE_SYNC:
868                 case MIGRATE_SYNC_NO_COPY:
869                         break;
870                 default:
871                         return -EBUSY;
872                 }
873                 return writeout(mapping, page);
874         }
875
876         /*
877          * Buffers may be managed in a filesystem specific way.
878          * We must have no buffers or drop them.
879          */
880         if (page_has_private(page) &&
881             !try_to_release_page(page, GFP_KERNEL))
882                 return -EAGAIN;
883
884         return migrate_page(mapping, newpage, page, mode);
885 }
886
887 /*
888  * Move a page to a newly allocated page
889  * The page is locked and all ptes have been successfully removed.
890  *
891  * The new page will have replaced the old page if this function
892  * is successful.
893  *
894  * Return value:
895  *   < 0 - error code
896  *  MIGRATEPAGE_SUCCESS - success
897  */
898 static int move_to_new_page(struct page *newpage, struct page *page,
899                                 enum migrate_mode mode)
900 {
901         struct address_space *mapping;
902         int rc = -EAGAIN;
903         bool is_lru = !__PageMovable(page);
904
905         VM_BUG_ON_PAGE(!PageLocked(page), page);
906         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
907
908         mapping = page_mapping(page);
909
910         if (likely(is_lru)) {
911                 if (!mapping)
912                         rc = migrate_page(mapping, newpage, page, mode);
913                 else if (mapping->a_ops->migratepage)
914                         /*
915                          * Most pages have a mapping and most filesystems
916                          * provide a migratepage callback. Anonymous pages
917                          * are part of swap space which also has its own
918                          * migratepage callback. This is the most common path
919                          * for page migration.
920                          */
921                         rc = mapping->a_ops->migratepage(mapping, newpage,
922                                                         page, mode);
923                 else
924                         rc = fallback_migrate_page(mapping, newpage,
925                                                         page, mode);
926         } else {
927                 /*
928                  * In case of non-lru page, it could be released after
929                  * isolation step. In that case, we shouldn't try migration.
930                  */
931                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
932                 if (!PageMovable(page)) {
933                         rc = MIGRATEPAGE_SUCCESS;
934                         __ClearPageIsolated(page);
935                         goto out;
936                 }
937
938                 rc = mapping->a_ops->migratepage(mapping, newpage,
939                                                 page, mode);
940                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
941                         !PageIsolated(page));
942         }
943
944         /*
945          * When successful, old pagecache page->mapping must be cleared before
946          * page is freed; but stats require that PageAnon be left as PageAnon.
947          */
948         if (rc == MIGRATEPAGE_SUCCESS) {
949                 if (__PageMovable(page)) {
950                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
951
952                         /*
953                          * We clear PG_movable under page_lock so any compactor
954                          * cannot try to migrate this page.
955                          */
956                         __ClearPageIsolated(page);
957                 }
958
959                 /*
960                  * Anonymous and movable page->mapping will be cleard by
961                  * free_pages_prepare so don't reset it here for keeping
962                  * the type to work PageAnon, for example.
963                  */
964                 if (!PageMappingFlags(page))
965                         page->mapping = NULL;
966         }
967 out:
968         return rc;
969 }
970
971 static int __unmap_and_move(struct page *page, struct page *newpage,
972                                 int force, enum migrate_mode mode)
973 {
974         int rc = -EAGAIN;
975         int page_was_mapped = 0;
976         struct anon_vma *anon_vma = NULL;
977         bool is_lru = !__PageMovable(page);
978
979         if (!trylock_page(page)) {
980                 if (!force || mode == MIGRATE_ASYNC)
981                         goto out;
982
983                 /*
984                  * It's not safe for direct compaction to call lock_page.
985                  * For example, during page readahead pages are added locked
986                  * to the LRU. Later, when the IO completes the pages are
987                  * marked uptodate and unlocked. However, the queueing
988                  * could be merging multiple pages for one bio (e.g.
989                  * mpage_readpages). If an allocation happens for the
990                  * second or third page, the process can end up locking
991                  * the same page twice and deadlocking. Rather than
992                  * trying to be clever about what pages can be locked,
993                  * avoid the use of lock_page for direct compaction
994                  * altogether.
995                  */
996                 if (current->flags & PF_MEMALLOC)
997                         goto out;
998
999                 lock_page(page);
1000         }
1001
1002         if (PageWriteback(page)) {
1003                 /*
1004                  * Only in the case of a full synchronous migration is it
1005                  * necessary to wait for PageWriteback. In the async case,
1006                  * the retry loop is too short and in the sync-light case,
1007                  * the overhead of stalling is too much
1008                  */
1009                 switch (mode) {
1010                 case MIGRATE_SYNC:
1011                 case MIGRATE_SYNC_NO_COPY:
1012                         break;
1013                 default:
1014                         rc = -EBUSY;
1015                         goto out_unlock;
1016                 }
1017                 if (!force)
1018                         goto out_unlock;
1019                 wait_on_page_writeback(page);
1020         }
1021
1022         /*
1023          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1024          * we cannot notice that anon_vma is freed while we migrates a page.
1025          * This get_anon_vma() delays freeing anon_vma pointer until the end
1026          * of migration. File cache pages are no problem because of page_lock()
1027          * File Caches may use write_page() or lock_page() in migration, then,
1028          * just care Anon page here.
1029          *
1030          * Only page_get_anon_vma() understands the subtleties of
1031          * getting a hold on an anon_vma from outside one of its mms.
1032          * But if we cannot get anon_vma, then we won't need it anyway,
1033          * because that implies that the anon page is no longer mapped
1034          * (and cannot be remapped so long as we hold the page lock).
1035          */
1036         if (PageAnon(page) && !PageKsm(page))
1037                 anon_vma = page_get_anon_vma(page);
1038
1039         /*
1040          * Block others from accessing the new page when we get around to
1041          * establishing additional references. We are usually the only one
1042          * holding a reference to newpage at this point. We used to have a BUG
1043          * here if trylock_page(newpage) fails, but would like to allow for
1044          * cases where there might be a race with the previous use of newpage.
1045          * This is much like races on refcount of oldpage: just don't BUG().
1046          */
1047         if (unlikely(!trylock_page(newpage)))
1048                 goto out_unlock;
1049
1050         if (unlikely(!is_lru)) {
1051                 rc = move_to_new_page(newpage, page, mode);
1052                 goto out_unlock_both;
1053         }
1054
1055         /*
1056          * Corner case handling:
1057          * 1. When a new swap-cache page is read into, it is added to the LRU
1058          * and treated as swapcache but it has no rmap yet.
1059          * Calling try_to_unmap() against a page->mapping==NULL page will
1060          * trigger a BUG.  So handle it here.
1061          * 2. An orphaned page (see truncate_complete_page) might have
1062          * fs-private metadata. The page can be picked up due to memory
1063          * offlining.  Everywhere else except page reclaim, the page is
1064          * invisible to the vm, so the page can not be migrated.  So try to
1065          * free the metadata, so the page can be freed.
1066          */
1067         if (!page->mapping) {
1068                 VM_BUG_ON_PAGE(PageAnon(page), page);
1069                 if (page_has_private(page)) {
1070                         try_to_free_buffers(page);
1071                         goto out_unlock_both;
1072                 }
1073         } else if (page_mapped(page)) {
1074                 /* Establish migration ptes */
1075                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1076                                 page);
1077                 try_to_unmap(page,
1078                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1079                 page_was_mapped = 1;
1080         }
1081
1082         if (!page_mapped(page))
1083                 rc = move_to_new_page(newpage, page, mode);
1084
1085         if (page_was_mapped)
1086                 remove_migration_ptes(page,
1087                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1088
1089 out_unlock_both:
1090         unlock_page(newpage);
1091 out_unlock:
1092         /* Drop an anon_vma reference if we took one */
1093         if (anon_vma)
1094                 put_anon_vma(anon_vma);
1095         unlock_page(page);
1096 out:
1097         /*
1098          * If migration is successful, decrease refcount of the newpage
1099          * which will not free the page because new page owner increased
1100          * refcounter. As well, if it is LRU page, add the page to LRU
1101          * list in here.
1102          */
1103         if (rc == MIGRATEPAGE_SUCCESS) {
1104                 if (unlikely(__PageMovable(newpage)))
1105                         put_page(newpage);
1106                 else
1107                         putback_lru_page(newpage);
1108         }
1109
1110         return rc;
1111 }
1112
1113 /*
1114  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
1115  * around it.
1116  */
1117 #if defined(CONFIG_ARM) && \
1118         defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
1119 #define ICE_noinline noinline
1120 #else
1121 #define ICE_noinline
1122 #endif
1123
1124 /*
1125  * Obtain the lock on page, remove all ptes and migrate the page
1126  * to the newly allocated page in newpage.
1127  */
1128 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1129                                    free_page_t put_new_page,
1130                                    unsigned long private, struct page *page,
1131                                    int force, enum migrate_mode mode,
1132                                    enum migrate_reason reason)
1133 {
1134         int rc = MIGRATEPAGE_SUCCESS;
1135         struct page *newpage;
1136
1137         if (!thp_migration_supported() && PageTransHuge(page))
1138                 return -ENOMEM;
1139
1140         newpage = get_new_page(page, private);
1141         if (!newpage)
1142                 return -ENOMEM;
1143
1144         if (page_count(page) == 1) {
1145                 /* page was freed from under us. So we are done. */
1146                 ClearPageActive(page);
1147                 ClearPageUnevictable(page);
1148                 if (unlikely(__PageMovable(page))) {
1149                         lock_page(page);
1150                         if (!PageMovable(page))
1151                                 __ClearPageIsolated(page);
1152                         unlock_page(page);
1153                 }
1154                 if (put_new_page)
1155                         put_new_page(newpage, private);
1156                 else
1157                         put_page(newpage);
1158                 goto out;
1159         }
1160
1161         rc = __unmap_and_move(page, newpage, force, mode);
1162         if (rc == MIGRATEPAGE_SUCCESS)
1163                 set_page_owner_migrate_reason(newpage, reason);
1164
1165 out:
1166         if (rc != -EAGAIN) {
1167                 /*
1168                  * A page that has been migrated has all references
1169                  * removed and will be freed. A page that has not been
1170                  * migrated will have kepts its references and be
1171                  * restored.
1172                  */
1173                 list_del(&page->lru);
1174
1175                 /*
1176                  * Compaction can migrate also non-LRU pages which are
1177                  * not accounted to NR_ISOLATED_*. They can be recognized
1178                  * as __PageMovable
1179                  */
1180                 if (likely(!__PageMovable(page)))
1181                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1182                                         page_is_file_cache(page), -hpage_nr_pages(page));
1183         }
1184
1185         /*
1186          * If migration is successful, releases reference grabbed during
1187          * isolation. Otherwise, restore the page to right list unless
1188          * we want to retry.
1189          */
1190         if (rc == MIGRATEPAGE_SUCCESS) {
1191                 put_page(page);
1192                 if (reason == MR_MEMORY_FAILURE) {
1193                         /*
1194                          * Set PG_HWPoison on just freed page
1195                          * intentionally. Although it's rather weird,
1196                          * it's how HWPoison flag works at the moment.
1197                          */
1198                         if (set_hwpoison_free_buddy_page(page))
1199                                 num_poisoned_pages_inc();
1200                 }
1201         } else {
1202                 if (rc != -EAGAIN) {
1203                         if (likely(!__PageMovable(page))) {
1204                                 putback_lru_page(page);
1205                                 goto put_new;
1206                         }
1207
1208                         lock_page(page);
1209                         if (PageMovable(page))
1210                                 putback_movable_page(page);
1211                         else
1212                                 __ClearPageIsolated(page);
1213                         unlock_page(page);
1214                         put_page(page);
1215                 }
1216 put_new:
1217                 if (put_new_page)
1218                         put_new_page(newpage, private);
1219                 else
1220                         put_page(newpage);
1221         }
1222
1223         return rc;
1224 }
1225
1226 /*
1227  * Counterpart of unmap_and_move_page() for hugepage migration.
1228  *
1229  * This function doesn't wait the completion of hugepage I/O
1230  * because there is no race between I/O and migration for hugepage.
1231  * Note that currently hugepage I/O occurs only in direct I/O
1232  * where no lock is held and PG_writeback is irrelevant,
1233  * and writeback status of all subpages are counted in the reference
1234  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1235  * under direct I/O, the reference of the head page is 512 and a bit more.)
1236  * This means that when we try to migrate hugepage whose subpages are
1237  * doing direct I/O, some references remain after try_to_unmap() and
1238  * hugepage migration fails without data corruption.
1239  *
1240  * There is also no race when direct I/O is issued on the page under migration,
1241  * because then pte is replaced with migration swap entry and direct I/O code
1242  * will wait in the page fault for migration to complete.
1243  */
1244 static int unmap_and_move_huge_page(new_page_t get_new_page,
1245                                 free_page_t put_new_page, unsigned long private,
1246                                 struct page *hpage, int force,
1247                                 enum migrate_mode mode, int reason)
1248 {
1249         int rc = -EAGAIN;
1250         int page_was_mapped = 0;
1251         struct page *new_hpage;
1252         struct anon_vma *anon_vma = NULL;
1253
1254         /*
1255          * Movability of hugepages depends on architectures and hugepage size.
1256          * This check is necessary because some callers of hugepage migration
1257          * like soft offline and memory hotremove don't walk through page
1258          * tables or check whether the hugepage is pmd-based or not before
1259          * kicking migration.
1260          */
1261         if (!hugepage_migration_supported(page_hstate(hpage))) {
1262                 putback_active_hugepage(hpage);
1263                 return -ENOSYS;
1264         }
1265
1266         new_hpage = get_new_page(hpage, private);
1267         if (!new_hpage)
1268                 return -ENOMEM;
1269
1270         if (!trylock_page(hpage)) {
1271                 if (!force)
1272                         goto out;
1273                 switch (mode) {
1274                 case MIGRATE_SYNC:
1275                 case MIGRATE_SYNC_NO_COPY:
1276                         break;
1277                 default:
1278                         goto out;
1279                 }
1280                 lock_page(hpage);
1281         }
1282
1283         if (PageAnon(hpage))
1284                 anon_vma = page_get_anon_vma(hpage);
1285
1286         if (unlikely(!trylock_page(new_hpage)))
1287                 goto put_anon;
1288
1289         if (page_mapped(hpage)) {
1290                 try_to_unmap(hpage,
1291                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1292                 page_was_mapped = 1;
1293         }
1294
1295         if (!page_mapped(hpage))
1296                 rc = move_to_new_page(new_hpage, hpage, mode);
1297
1298         if (page_was_mapped)
1299                 remove_migration_ptes(hpage,
1300                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1301
1302         unlock_page(new_hpage);
1303
1304 put_anon:
1305         if (anon_vma)
1306                 put_anon_vma(anon_vma);
1307
1308         if (rc == MIGRATEPAGE_SUCCESS) {
1309                 move_hugetlb_state(hpage, new_hpage, reason);
1310                 put_new_page = NULL;
1311         }
1312
1313         unlock_page(hpage);
1314 out:
1315         if (rc != -EAGAIN)
1316                 putback_active_hugepage(hpage);
1317
1318         /*
1319          * If migration was not successful and there's a freeing callback, use
1320          * it.  Otherwise, put_page() will drop the reference grabbed during
1321          * isolation.
1322          */
1323         if (put_new_page)
1324                 put_new_page(new_hpage, private);
1325         else
1326                 putback_active_hugepage(new_hpage);
1327
1328         return rc;
1329 }
1330
1331 /*
1332  * migrate_pages - migrate the pages specified in a list, to the free pages
1333  *                 supplied as the target for the page migration
1334  *
1335  * @from:               The list of pages to be migrated.
1336  * @get_new_page:       The function used to allocate free pages to be used
1337  *                      as the target of the page migration.
1338  * @put_new_page:       The function used to free target pages if migration
1339  *                      fails, or NULL if no special handling is necessary.
1340  * @private:            Private data to be passed on to get_new_page()
1341  * @mode:               The migration mode that specifies the constraints for
1342  *                      page migration, if any.
1343  * @reason:             The reason for page migration.
1344  *
1345  * The function returns after 10 attempts or if no pages are movable any more
1346  * because the list has become empty or no retryable pages exist any more.
1347  * The caller should call putback_movable_pages() to return pages to the LRU
1348  * or free list only if ret != 0.
1349  *
1350  * Returns the number of pages that were not migrated, or an error code.
1351  */
1352 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1353                 free_page_t put_new_page, unsigned long private,
1354                 enum migrate_mode mode, int reason)
1355 {
1356         int retry = 1;
1357         int nr_failed = 0;
1358         int nr_succeeded = 0;
1359         int pass = 0;
1360         struct page *page;
1361         struct page *page2;
1362         int swapwrite = current->flags & PF_SWAPWRITE;
1363         int rc;
1364
1365         if (!swapwrite)
1366                 current->flags |= PF_SWAPWRITE;
1367
1368         for(pass = 0; pass < 10 && retry; pass++) {
1369                 retry = 0;
1370
1371                 list_for_each_entry_safe(page, page2, from, lru) {
1372 retry:
1373                         cond_resched();
1374
1375                         if (PageHuge(page))
1376                                 rc = unmap_and_move_huge_page(get_new_page,
1377                                                 put_new_page, private, page,
1378                                                 pass > 2, mode, reason);
1379                         else
1380                                 rc = unmap_and_move(get_new_page, put_new_page,
1381                                                 private, page, pass > 2, mode,
1382                                                 reason);
1383
1384                         switch(rc) {
1385                         case -ENOMEM:
1386                                 /*
1387                                  * THP migration might be unsupported or the
1388                                  * allocation could've failed so we should
1389                                  * retry on the same page with the THP split
1390                                  * to base pages.
1391                                  *
1392                                  * Head page is retried immediately and tail
1393                                  * pages are added to the tail of the list so
1394                                  * we encounter them after the rest of the list
1395                                  * is processed.
1396                                  */
1397                                 if (PageTransHuge(page) && !PageHuge(page)) {
1398                                         lock_page(page);
1399                                         rc = split_huge_page_to_list(page, from);
1400                                         unlock_page(page);
1401                                         if (!rc) {
1402                                                 list_safe_reset_next(page, page2, lru);
1403                                                 goto retry;
1404                                         }
1405                                 }
1406                                 nr_failed++;
1407                                 goto out;
1408                         case -EAGAIN:
1409                                 retry++;
1410                                 break;
1411                         case MIGRATEPAGE_SUCCESS:
1412                                 nr_succeeded++;
1413                                 break;
1414                         default:
1415                                 /*
1416                                  * Permanent failure (-EBUSY, -ENOSYS, etc.):
1417                                  * unlike -EAGAIN case, the failed page is
1418                                  * removed from migration page list and not
1419                                  * retried in the next outer loop.
1420                                  */
1421                                 nr_failed++;
1422                                 break;
1423                         }
1424                 }
1425         }
1426         nr_failed += retry;
1427         rc = nr_failed;
1428 out:
1429         if (nr_succeeded)
1430                 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1431         if (nr_failed)
1432                 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1433         trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1434
1435         if (!swapwrite)
1436                 current->flags &= ~PF_SWAPWRITE;
1437
1438         return rc;
1439 }
1440
1441 #ifdef CONFIG_NUMA
1442
1443 static int store_status(int __user *status, int start, int value, int nr)
1444 {
1445         while (nr-- > 0) {
1446                 if (put_user(value, status + start))
1447                         return -EFAULT;
1448                 start++;
1449         }
1450
1451         return 0;
1452 }
1453
1454 static int do_move_pages_to_node(struct mm_struct *mm,
1455                 struct list_head *pagelist, int node)
1456 {
1457         int err;
1458
1459         if (list_empty(pagelist))
1460                 return 0;
1461
1462         err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
1463                         MIGRATE_SYNC, MR_SYSCALL);
1464         if (err)
1465                 putback_movable_pages(pagelist);
1466         return err;
1467 }
1468
1469 /*
1470  * Resolves the given address to a struct page, isolates it from the LRU and
1471  * puts it to the given pagelist.
1472  * Returns -errno if the page cannot be found/isolated or 0 when it has been
1473  * queued or the page doesn't need to be migrated because it is already on
1474  * the target node
1475  */
1476 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1477                 int node, struct list_head *pagelist, bool migrate_all)
1478 {
1479         struct vm_area_struct *vma;
1480         struct page *page;
1481         unsigned int follflags;
1482         int err;
1483
1484         down_read(&mm->mmap_sem);
1485         err = -EFAULT;
1486         vma = find_vma(mm, addr);
1487         if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1488                 goto out;
1489
1490         /* FOLL_DUMP to ignore special (like zero) pages */
1491         follflags = FOLL_GET | FOLL_DUMP;
1492         page = follow_page(vma, addr, follflags);
1493
1494         err = PTR_ERR(page);
1495         if (IS_ERR(page))
1496                 goto out;
1497
1498         err = -ENOENT;
1499         if (!page)
1500                 goto out;
1501
1502         err = 0;
1503         if (page_to_nid(page) == node)
1504                 goto out_putpage;
1505
1506         err = -EACCES;
1507         if (page_mapcount(page) > 1 && !migrate_all)
1508                 goto out_putpage;
1509
1510         if (PageHuge(page)) {
1511                 if (PageHead(page)) {
1512                         isolate_huge_page(page, pagelist);
1513                         err = 0;
1514                 }
1515         } else {
1516                 struct page *head;
1517
1518                 head = compound_head(page);
1519                 err = isolate_lru_page(head);
1520                 if (err)
1521                         goto out_putpage;
1522
1523                 err = 0;
1524                 list_add_tail(&head->lru, pagelist);
1525                 mod_node_page_state(page_pgdat(head),
1526                         NR_ISOLATED_ANON + page_is_file_cache(head),
1527                         hpage_nr_pages(head));
1528         }
1529 out_putpage:
1530         /*
1531          * Either remove the duplicate refcount from
1532          * isolate_lru_page() or drop the page ref if it was
1533          * not isolated.
1534          */
1535         put_page(page);
1536 out:
1537         up_read(&mm->mmap_sem);
1538         return err;
1539 }
1540
1541 /*
1542  * Migrate an array of page address onto an array of nodes and fill
1543  * the corresponding array of status.
1544  */
1545 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1546                          unsigned long nr_pages,
1547                          const void __user * __user *pages,
1548                          const int __user *nodes,
1549                          int __user *status, int flags)
1550 {
1551         int current_node = NUMA_NO_NODE;
1552         LIST_HEAD(pagelist);
1553         int start, i;
1554         int err = 0, err1;
1555
1556         migrate_prep();
1557
1558         for (i = start = 0; i < nr_pages; i++) {
1559                 const void __user *p;
1560                 unsigned long addr;
1561                 int node;
1562
1563                 err = -EFAULT;
1564                 if (get_user(p, pages + i))
1565                         goto out_flush;
1566                 if (get_user(node, nodes + i))
1567                         goto out_flush;
1568                 addr = (unsigned long)p;
1569
1570                 err = -ENODEV;
1571                 if (node < 0 || node >= MAX_NUMNODES)
1572                         goto out_flush;
1573                 if (!node_state(node, N_MEMORY))
1574                         goto out_flush;
1575
1576                 err = -EACCES;
1577                 if (!node_isset(node, task_nodes))
1578                         goto out_flush;
1579
1580                 if (current_node == NUMA_NO_NODE) {
1581                         current_node = node;
1582                         start = i;
1583                 } else if (node != current_node) {
1584                         err = do_move_pages_to_node(mm, &pagelist, current_node);
1585                         if (err)
1586                                 goto out;
1587                         err = store_status(status, start, current_node, i - start);
1588                         if (err)
1589                                 goto out;
1590                         start = i;
1591                         current_node = node;
1592                 }
1593
1594                 /*
1595                  * Errors in the page lookup or isolation are not fatal and we simply
1596                  * report them via status
1597                  */
1598                 err = add_page_for_migration(mm, addr, current_node,
1599                                 &pagelist, flags & MPOL_MF_MOVE_ALL);
1600                 if (!err)
1601                         continue;
1602
1603                 err = store_status(status, i, err, 1);
1604                 if (err)
1605                         goto out_flush;
1606
1607                 err = do_move_pages_to_node(mm, &pagelist, current_node);
1608                 if (err)
1609                         goto out;
1610                 if (i > start) {
1611                         err = store_status(status, start, current_node, i - start);
1612                         if (err)
1613                                 goto out;
1614                 }
1615                 current_node = NUMA_NO_NODE;
1616         }
1617 out_flush:
1618         if (list_empty(&pagelist))
1619                 return err;
1620
1621         /* Make sure we do not overwrite the existing error */
1622         err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1623         if (!err1)
1624                 err1 = store_status(status, start, current_node, i - start);
1625         if (!err)
1626                 err = err1;
1627 out:
1628         return err;
1629 }
1630
1631 /*
1632  * Determine the nodes of an array of pages and store it in an array of status.
1633  */
1634 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1635                                 const void __user **pages, int *status)
1636 {
1637         unsigned long i;
1638
1639         down_read(&mm->mmap_sem);
1640
1641         for (i = 0; i < nr_pages; i++) {
1642                 unsigned long addr = (unsigned long)(*pages);
1643                 struct vm_area_struct *vma;
1644                 struct page *page;
1645                 int err = -EFAULT;
1646
1647                 vma = find_vma(mm, addr);
1648                 if (!vma || addr < vma->vm_start)
1649                         goto set_status;
1650
1651                 /* FOLL_DUMP to ignore special (like zero) pages */
1652                 page = follow_page(vma, addr, FOLL_DUMP);
1653
1654                 err = PTR_ERR(page);
1655                 if (IS_ERR(page))
1656                         goto set_status;
1657
1658                 err = page ? page_to_nid(page) : -ENOENT;
1659 set_status:
1660                 *status = err;
1661
1662                 pages++;
1663                 status++;
1664         }
1665
1666         up_read(&mm->mmap_sem);
1667 }
1668
1669 /*
1670  * Determine the nodes of a user array of pages and store it in
1671  * a user array of status.
1672  */
1673 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1674                          const void __user * __user *pages,
1675                          int __user *status)
1676 {
1677 #define DO_PAGES_STAT_CHUNK_NR 16
1678         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1679         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1680
1681         while (nr_pages) {
1682                 unsigned long chunk_nr;
1683
1684                 chunk_nr = nr_pages;
1685                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1686                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1687
1688                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1689                         break;
1690
1691                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1692
1693                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1694                         break;
1695
1696                 pages += chunk_nr;
1697                 status += chunk_nr;
1698                 nr_pages -= chunk_nr;
1699         }
1700         return nr_pages ? -EFAULT : 0;
1701 }
1702
1703 /*
1704  * Move a list of pages in the address space of the currently executing
1705  * process.
1706  */
1707 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1708                              const void __user * __user *pages,
1709                              const int __user *nodes,
1710                              int __user *status, int flags)
1711 {
1712         struct task_struct *task;
1713         struct mm_struct *mm;
1714         int err;
1715         nodemask_t task_nodes;
1716
1717         /* Check flags */
1718         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1719                 return -EINVAL;
1720
1721         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1722                 return -EPERM;
1723
1724         /* Find the mm_struct */
1725         rcu_read_lock();
1726         task = pid ? find_task_by_vpid(pid) : current;
1727         if (!task) {
1728                 rcu_read_unlock();
1729                 return -ESRCH;
1730         }
1731         get_task_struct(task);
1732
1733         /*
1734          * Check if this process has the right to modify the specified
1735          * process. Use the regular "ptrace_may_access()" checks.
1736          */
1737         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1738                 rcu_read_unlock();
1739                 err = -EPERM;
1740                 goto out;
1741         }
1742         rcu_read_unlock();
1743
1744         err = security_task_movememory(task);
1745         if (err)
1746                 goto out;
1747
1748         task_nodes = cpuset_mems_allowed(task);
1749         mm = get_task_mm(task);
1750         put_task_struct(task);
1751
1752         if (!mm)
1753                 return -EINVAL;
1754
1755         if (nodes)
1756                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1757                                     nodes, status, flags);
1758         else
1759                 err = do_pages_stat(mm, nr_pages, pages, status);
1760
1761         mmput(mm);
1762         return err;
1763
1764 out:
1765         put_task_struct(task);
1766         return err;
1767 }
1768
1769 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1770                 const void __user * __user *, pages,
1771                 const int __user *, nodes,
1772                 int __user *, status, int, flags)
1773 {
1774         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1775 }
1776
1777 #ifdef CONFIG_COMPAT
1778 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1779                        compat_uptr_t __user *, pages32,
1780                        const int __user *, nodes,
1781                        int __user *, status,
1782                        int, flags)
1783 {
1784         const void __user * __user *pages;
1785         int i;
1786
1787         pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1788         for (i = 0; i < nr_pages; i++) {
1789                 compat_uptr_t p;
1790
1791                 if (get_user(p, pages32 + i) ||
1792                         put_user(compat_ptr(p), pages + i))
1793                         return -EFAULT;
1794         }
1795         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1796 }
1797 #endif /* CONFIG_COMPAT */
1798
1799 #ifdef CONFIG_NUMA_BALANCING
1800 /*
1801  * Returns true if this is a safe migration target node for misplaced NUMA
1802  * pages. Currently it only checks the watermarks which crude
1803  */
1804 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1805                                    unsigned long nr_migrate_pages)
1806 {
1807         int z;
1808
1809         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1810                 struct zone *zone = pgdat->node_zones + z;
1811
1812                 if (!populated_zone(zone))
1813                         continue;
1814
1815                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1816                 if (!zone_watermark_ok(zone, 0,
1817                                        high_wmark_pages(zone) +
1818                                        nr_migrate_pages,
1819                                        0, 0))
1820                         continue;
1821                 return true;
1822         }
1823         return false;
1824 }
1825
1826 static struct page *alloc_misplaced_dst_page(struct page *page,
1827                                            unsigned long data)
1828 {
1829         int nid = (int) data;
1830         struct page *newpage;
1831
1832         newpage = __alloc_pages_node(nid,
1833                                          (GFP_HIGHUSER_MOVABLE |
1834                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
1835                                           __GFP_NORETRY | __GFP_NOWARN) &
1836                                          ~__GFP_RECLAIM, 0);
1837
1838         return newpage;
1839 }
1840
1841 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1842 {
1843         int page_lru;
1844
1845         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1846
1847         /* Avoid migrating to a node that is nearly full */
1848         if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1849                 return 0;
1850
1851         if (isolate_lru_page(page))
1852                 return 0;
1853
1854         /*
1855          * migrate_misplaced_transhuge_page() skips page migration's usual
1856          * check on page_count(), so we must do it here, now that the page
1857          * has been isolated: a GUP pin, or any other pin, prevents migration.
1858          * The expected page count is 3: 1 for page's mapcount and 1 for the
1859          * caller's pin and 1 for the reference taken by isolate_lru_page().
1860          */
1861         if (PageTransHuge(page) && page_count(page) != 3) {
1862                 putback_lru_page(page);
1863                 return 0;
1864         }
1865
1866         page_lru = page_is_file_cache(page);
1867         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1868                                 hpage_nr_pages(page));
1869
1870         /*
1871          * Isolating the page has taken another reference, so the
1872          * caller's reference can be safely dropped without the page
1873          * disappearing underneath us during migration.
1874          */
1875         put_page(page);
1876         return 1;
1877 }
1878
1879 bool pmd_trans_migrating(pmd_t pmd)
1880 {
1881         struct page *page = pmd_page(pmd);
1882         return PageLocked(page);
1883 }
1884
1885 /*
1886  * Attempt to migrate a misplaced page to the specified destination
1887  * node. Caller is expected to have an elevated reference count on
1888  * the page that will be dropped by this function before returning.
1889  */
1890 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1891                            int node)
1892 {
1893         pg_data_t *pgdat = NODE_DATA(node);
1894         int isolated;
1895         int nr_remaining;
1896         LIST_HEAD(migratepages);
1897
1898         /*
1899          * Don't migrate file pages that are mapped in multiple processes
1900          * with execute permissions as they are probably shared libraries.
1901          */
1902         if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1903             (vma->vm_flags & VM_EXEC))
1904                 goto out;
1905
1906         /*
1907          * Also do not migrate dirty pages as not all filesystems can move
1908          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
1909          */
1910         if (page_is_file_cache(page) && PageDirty(page))
1911                 goto out;
1912
1913         isolated = numamigrate_isolate_page(pgdat, page);
1914         if (!isolated)
1915                 goto out;
1916
1917         list_add(&page->lru, &migratepages);
1918         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1919                                      NULL, node, MIGRATE_ASYNC,
1920                                      MR_NUMA_MISPLACED);
1921         if (nr_remaining) {
1922                 if (!list_empty(&migratepages)) {
1923                         list_del(&page->lru);
1924                         dec_node_page_state(page, NR_ISOLATED_ANON +
1925                                         page_is_file_cache(page));
1926                         putback_lru_page(page);
1927                 }
1928                 isolated = 0;
1929         } else
1930                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1931         BUG_ON(!list_empty(&migratepages));
1932         return isolated;
1933
1934 out:
1935         put_page(page);
1936         return 0;
1937 }
1938 #endif /* CONFIG_NUMA_BALANCING */
1939
1940 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1941 /*
1942  * Migrates a THP to a given target node. page must be locked and is unlocked
1943  * before returning.
1944  */
1945 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1946                                 struct vm_area_struct *vma,
1947                                 pmd_t *pmd, pmd_t entry,
1948                                 unsigned long address,
1949                                 struct page *page, int node)
1950 {
1951         spinlock_t *ptl;
1952         pg_data_t *pgdat = NODE_DATA(node);
1953         int isolated = 0;
1954         struct page *new_page = NULL;
1955         int page_lru = page_is_file_cache(page);
1956         unsigned long start = address & HPAGE_PMD_MASK;
1957
1958         new_page = alloc_pages_node(node,
1959                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1960                 HPAGE_PMD_ORDER);
1961         if (!new_page)
1962                 goto out_fail;
1963         prep_transhuge_page(new_page);
1964
1965         isolated = numamigrate_isolate_page(pgdat, page);
1966         if (!isolated) {
1967                 put_page(new_page);
1968                 goto out_fail;
1969         }
1970
1971         /* Prepare a page as a migration target */
1972         __SetPageLocked(new_page);
1973         if (PageSwapBacked(page))
1974                 __SetPageSwapBacked(new_page);
1975
1976         /* anon mapping, we can simply copy page->mapping to the new page: */
1977         new_page->mapping = page->mapping;
1978         new_page->index = page->index;
1979         /* flush the cache before copying using the kernel virtual address */
1980         flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
1981         migrate_page_copy(new_page, page);
1982         WARN_ON(PageLRU(new_page));
1983
1984         /* Recheck the target PMD */
1985         ptl = pmd_lock(mm, pmd);
1986         if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
1987                 spin_unlock(ptl);
1988
1989                 /* Reverse changes made by migrate_page_copy() */
1990                 if (TestClearPageActive(new_page))
1991                         SetPageActive(page);
1992                 if (TestClearPageUnevictable(new_page))
1993                         SetPageUnevictable(page);
1994
1995                 unlock_page(new_page);
1996                 put_page(new_page);             /* Free it */
1997
1998                 /* Retake the callers reference and putback on LRU */
1999                 get_page(page);
2000                 putback_lru_page(page);
2001                 mod_node_page_state(page_pgdat(page),
2002                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2003
2004                 goto out_unlock;
2005         }
2006
2007         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2008         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2009
2010         /*
2011          * Overwrite the old entry under pagetable lock and establish
2012          * the new PTE. Any parallel GUP will either observe the old
2013          * page blocking on the page lock, block on the page table
2014          * lock or observe the new page. The SetPageUptodate on the
2015          * new page and page_add_new_anon_rmap guarantee the copy is
2016          * visible before the pagetable update.
2017          */
2018         page_add_anon_rmap(new_page, vma, start, true);
2019         /*
2020          * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2021          * has already been flushed globally.  So no TLB can be currently
2022          * caching this non present pmd mapping.  There's no need to clear the
2023          * pmd before doing set_pmd_at(), nor to flush the TLB after
2024          * set_pmd_at().  Clearing the pmd here would introduce a race
2025          * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2026          * mmap_sem for reading.  If the pmd is set to NULL at any given time,
2027          * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2028          * pmd.
2029          */
2030         set_pmd_at(mm, start, pmd, entry);
2031         update_mmu_cache_pmd(vma, address, &entry);
2032
2033         page_ref_unfreeze(page, 2);
2034         mlock_migrate_page(new_page, page);
2035         page_remove_rmap(page, true);
2036         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2037
2038         spin_unlock(ptl);
2039
2040         /* Take an "isolate" reference and put new page on the LRU. */
2041         get_page(new_page);
2042         putback_lru_page(new_page);
2043
2044         unlock_page(new_page);
2045         unlock_page(page);
2046         put_page(page);                 /* Drop the rmap reference */
2047         put_page(page);                 /* Drop the LRU isolation reference */
2048
2049         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2050         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2051
2052         mod_node_page_state(page_pgdat(page),
2053                         NR_ISOLATED_ANON + page_lru,
2054                         -HPAGE_PMD_NR);
2055         return isolated;
2056
2057 out_fail:
2058         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2059         ptl = pmd_lock(mm, pmd);
2060         if (pmd_same(*pmd, entry)) {
2061                 entry = pmd_modify(entry, vma->vm_page_prot);
2062                 set_pmd_at(mm, start, pmd, entry);
2063                 update_mmu_cache_pmd(vma, address, &entry);
2064         }
2065         spin_unlock(ptl);
2066
2067 out_unlock:
2068         unlock_page(page);
2069         put_page(page);
2070         return 0;
2071 }
2072 #endif /* CONFIG_NUMA_BALANCING */
2073
2074 #endif /* CONFIG_NUMA */
2075
2076 #if defined(CONFIG_MIGRATE_VMA_HELPER)
2077 struct migrate_vma {
2078         struct vm_area_struct   *vma;
2079         unsigned long           *dst;
2080         unsigned long           *src;
2081         unsigned long           cpages;
2082         unsigned long           npages;
2083         unsigned long           start;
2084         unsigned long           end;
2085 };
2086
2087 static int migrate_vma_collect_hole(unsigned long start,
2088                                     unsigned long end,
2089                                     struct mm_walk *walk)
2090 {
2091         struct migrate_vma *migrate = walk->private;
2092         unsigned long addr;
2093
2094         for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2095                 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2096                 migrate->dst[migrate->npages] = 0;
2097                 migrate->npages++;
2098                 migrate->cpages++;
2099         }
2100
2101         return 0;
2102 }
2103
2104 static int migrate_vma_collect_skip(unsigned long start,
2105                                     unsigned long end,
2106                                     struct mm_walk *walk)
2107 {
2108         struct migrate_vma *migrate = walk->private;
2109         unsigned long addr;
2110
2111         for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2112                 migrate->dst[migrate->npages] = 0;
2113                 migrate->src[migrate->npages++] = 0;
2114         }
2115
2116         return 0;
2117 }
2118
2119 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2120                                    unsigned long start,
2121                                    unsigned long end,
2122                                    struct mm_walk *walk)
2123 {
2124         struct migrate_vma *migrate = walk->private;
2125         struct vm_area_struct *vma = walk->vma;
2126         struct mm_struct *mm = vma->vm_mm;
2127         unsigned long addr = start, unmapped = 0;
2128         spinlock_t *ptl;
2129         pte_t *ptep;
2130
2131 again:
2132         if (pmd_none(*pmdp))
2133                 return migrate_vma_collect_hole(start, end, walk);
2134
2135         if (pmd_trans_huge(*pmdp)) {
2136                 struct page *page;
2137
2138                 ptl = pmd_lock(mm, pmdp);
2139                 if (unlikely(!pmd_trans_huge(*pmdp))) {
2140                         spin_unlock(ptl);
2141                         goto again;
2142                 }
2143
2144                 page = pmd_page(*pmdp);
2145                 if (is_huge_zero_page(page)) {
2146                         spin_unlock(ptl);
2147                         split_huge_pmd(vma, pmdp, addr);
2148                         if (pmd_trans_unstable(pmdp))
2149                                 return migrate_vma_collect_skip(start, end,
2150                                                                 walk);
2151                 } else {
2152                         int ret;
2153
2154                         get_page(page);
2155                         spin_unlock(ptl);
2156                         if (unlikely(!trylock_page(page)))
2157                                 return migrate_vma_collect_skip(start, end,
2158                                                                 walk);
2159                         ret = split_huge_page(page);
2160                         unlock_page(page);
2161                         put_page(page);
2162                         if (ret)
2163                                 return migrate_vma_collect_skip(start, end,
2164                                                                 walk);
2165                         if (pmd_none(*pmdp))
2166                                 return migrate_vma_collect_hole(start, end,
2167                                                                 walk);
2168                 }
2169         }
2170
2171         if (unlikely(pmd_bad(*pmdp)))
2172                 return migrate_vma_collect_skip(start, end, walk);
2173
2174         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2175         arch_enter_lazy_mmu_mode();
2176
2177         for (; addr < end; addr += PAGE_SIZE, ptep++) {
2178                 unsigned long mpfn, pfn;
2179                 struct page *page;
2180                 swp_entry_t entry;
2181                 pte_t pte;
2182
2183                 pte = *ptep;
2184                 pfn = pte_pfn(pte);
2185
2186                 if (pte_none(pte)) {
2187                         mpfn = MIGRATE_PFN_MIGRATE;
2188                         migrate->cpages++;
2189                         pfn = 0;
2190                         goto next;
2191                 }
2192
2193                 if (!pte_present(pte)) {
2194                         mpfn = pfn = 0;
2195
2196                         /*
2197                          * Only care about unaddressable device page special
2198                          * page table entry. Other special swap entries are not
2199                          * migratable, and we ignore regular swapped page.
2200                          */
2201                         entry = pte_to_swp_entry(pte);
2202                         if (!is_device_private_entry(entry))
2203                                 goto next;
2204
2205                         page = device_private_entry_to_page(entry);
2206                         mpfn = migrate_pfn(page_to_pfn(page))|
2207                                 MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
2208                         if (is_write_device_private_entry(entry))
2209                                 mpfn |= MIGRATE_PFN_WRITE;
2210                 } else {
2211                         if (is_zero_pfn(pfn)) {
2212                                 mpfn = MIGRATE_PFN_MIGRATE;
2213                                 migrate->cpages++;
2214                                 pfn = 0;
2215                                 goto next;
2216                         }
2217                         page = _vm_normal_page(migrate->vma, addr, pte, true);
2218                         mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2219                         mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2220                 }
2221
2222                 /* FIXME support THP */
2223                 if (!page || !page->mapping || PageTransCompound(page)) {
2224                         mpfn = pfn = 0;
2225                         goto next;
2226                 }
2227                 pfn = page_to_pfn(page);
2228
2229                 /*
2230                  * By getting a reference on the page we pin it and that blocks
2231                  * any kind of migration. Side effect is that it "freezes" the
2232                  * pte.
2233                  *
2234                  * We drop this reference after isolating the page from the lru
2235                  * for non device page (device page are not on the lru and thus
2236                  * can't be dropped from it).
2237                  */
2238                 get_page(page);
2239                 migrate->cpages++;
2240
2241                 /*
2242                  * Optimize for the common case where page is only mapped once
2243                  * in one process. If we can lock the page, then we can safely
2244                  * set up a special migration page table entry now.
2245                  */
2246                 if (trylock_page(page)) {
2247                         pte_t swp_pte;
2248
2249                         mpfn |= MIGRATE_PFN_LOCKED;
2250                         ptep_get_and_clear(mm, addr, ptep);
2251
2252                         /* Setup special migration page table entry */
2253                         entry = make_migration_entry(page, mpfn &
2254                                                      MIGRATE_PFN_WRITE);
2255                         swp_pte = swp_entry_to_pte(entry);
2256                         if (pte_soft_dirty(pte))
2257                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2258                         set_pte_at(mm, addr, ptep, swp_pte);
2259
2260                         /*
2261                          * This is like regular unmap: we remove the rmap and
2262                          * drop page refcount. Page won't be freed, as we took
2263                          * a reference just above.
2264                          */
2265                         page_remove_rmap(page, false);
2266                         put_page(page);
2267
2268                         if (pte_present(pte))
2269                                 unmapped++;
2270                 }
2271
2272 next:
2273                 migrate->dst[migrate->npages] = 0;
2274                 migrate->src[migrate->npages++] = mpfn;
2275         }
2276         arch_leave_lazy_mmu_mode();
2277         pte_unmap_unlock(ptep - 1, ptl);
2278
2279         /* Only flush the TLB if we actually modified any entries */
2280         if (unmapped)
2281                 flush_tlb_range(walk->vma, start, end);
2282
2283         return 0;
2284 }
2285
2286 /*
2287  * migrate_vma_collect() - collect pages over a range of virtual addresses
2288  * @migrate: migrate struct containing all migration information
2289  *
2290  * This will walk the CPU page table. For each virtual address backed by a
2291  * valid page, it updates the src array and takes a reference on the page, in
2292  * order to pin the page until we lock it and unmap it.
2293  */
2294 static void migrate_vma_collect(struct migrate_vma *migrate)
2295 {
2296         struct mmu_notifier_range range;
2297         struct mm_walk mm_walk;
2298
2299         mm_walk.pmd_entry = migrate_vma_collect_pmd;
2300         mm_walk.pte_entry = NULL;
2301         mm_walk.pte_hole = migrate_vma_collect_hole;
2302         mm_walk.hugetlb_entry = NULL;
2303         mm_walk.test_walk = NULL;
2304         mm_walk.vma = migrate->vma;
2305         mm_walk.mm = migrate->vma->vm_mm;
2306         mm_walk.private = migrate;
2307
2308         mmu_notifier_range_init(&range, mm_walk.mm, migrate->start,
2309                                 migrate->end);
2310         mmu_notifier_invalidate_range_start(&range);
2311         walk_page_range(migrate->start, migrate->end, &mm_walk);
2312         mmu_notifier_invalidate_range_end(&range);
2313
2314         migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2315 }
2316
2317 /*
2318  * migrate_vma_check_page() - check if page is pinned or not
2319  * @page: struct page to check
2320  *
2321  * Pinned pages cannot be migrated. This is the same test as in
2322  * migrate_page_move_mapping(), except that here we allow migration of a
2323  * ZONE_DEVICE page.
2324  */
2325 static bool migrate_vma_check_page(struct page *page)
2326 {
2327         /*
2328          * One extra ref because caller holds an extra reference, either from
2329          * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2330          * a device page.
2331          */
2332         int extra = 1;
2333
2334         /*
2335          * FIXME support THP (transparent huge page), it is bit more complex to
2336          * check them than regular pages, because they can be mapped with a pmd
2337          * or with a pte (split pte mapping).
2338          */
2339         if (PageCompound(page))
2340                 return false;
2341
2342         /* Page from ZONE_DEVICE have one extra reference */
2343         if (is_zone_device_page(page)) {
2344                 /*
2345                  * Private page can never be pin as they have no valid pte and
2346                  * GUP will fail for those. Yet if there is a pending migration
2347                  * a thread might try to wait on the pte migration entry and
2348                  * will bump the page reference count. Sadly there is no way to
2349                  * differentiate a regular pin from migration wait. Hence to
2350                  * avoid 2 racing thread trying to migrate back to CPU to enter
2351                  * infinite loop (one stoping migration because the other is
2352                  * waiting on pte migration entry). We always return true here.
2353                  *
2354                  * FIXME proper solution is to rework migration_entry_wait() so
2355                  * it does not need to take a reference on page.
2356                  */
2357                 if (is_device_private_page(page))
2358                         return true;
2359
2360                 /*
2361                  * Only allow device public page to be migrated and account for
2362                  * the extra reference count imply by ZONE_DEVICE pages.
2363                  */
2364                 if (!is_device_public_page(page))
2365                         return false;
2366                 extra++;
2367         }
2368
2369         /* For file back page */
2370         if (page_mapping(page))
2371                 extra += 1 + page_has_private(page);
2372
2373         if ((page_count(page) - extra) > page_mapcount(page))
2374                 return false;
2375
2376         return true;
2377 }
2378
2379 /*
2380  * migrate_vma_prepare() - lock pages and isolate them from the lru
2381  * @migrate: migrate struct containing all migration information
2382  *
2383  * This locks pages that have been collected by migrate_vma_collect(). Once each
2384  * page is locked it is isolated from the lru (for non-device pages). Finally,
2385  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2386  * migrated by concurrent kernel threads.
2387  */
2388 static void migrate_vma_prepare(struct migrate_vma *migrate)
2389 {
2390         const unsigned long npages = migrate->npages;
2391         const unsigned long start = migrate->start;
2392         unsigned long addr, i, restore = 0;
2393         bool allow_drain = true;
2394
2395         lru_add_drain();
2396
2397         for (i = 0; (i < npages) && migrate->cpages; i++) {
2398                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2399                 bool remap = true;
2400
2401                 if (!page)
2402                         continue;
2403
2404                 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2405                         /*
2406                          * Because we are migrating several pages there can be
2407                          * a deadlock between 2 concurrent migration where each
2408                          * are waiting on each other page lock.
2409                          *
2410                          * Make migrate_vma() a best effort thing and backoff
2411                          * for any page we can not lock right away.
2412                          */
2413                         if (!trylock_page(page)) {
2414                                 migrate->src[i] = 0;
2415                                 migrate->cpages--;
2416                                 put_page(page);
2417                                 continue;
2418                         }
2419                         remap = false;
2420                         migrate->src[i] |= MIGRATE_PFN_LOCKED;
2421                 }
2422
2423                 /* ZONE_DEVICE pages are not on LRU */
2424                 if (!is_zone_device_page(page)) {
2425                         if (!PageLRU(page) && allow_drain) {
2426                                 /* Drain CPU's pagevec */
2427                                 lru_add_drain_all();
2428                                 allow_drain = false;
2429                         }
2430
2431                         if (isolate_lru_page(page)) {
2432                                 if (remap) {
2433                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2434                                         migrate->cpages--;
2435                                         restore++;
2436                                 } else {
2437                                         migrate->src[i] = 0;
2438                                         unlock_page(page);
2439                                         migrate->cpages--;
2440                                         put_page(page);
2441                                 }
2442                                 continue;
2443                         }
2444
2445                         /* Drop the reference we took in collect */
2446                         put_page(page);
2447                 }
2448
2449                 if (!migrate_vma_check_page(page)) {
2450                         if (remap) {
2451                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2452                                 migrate->cpages--;
2453                                 restore++;
2454
2455                                 if (!is_zone_device_page(page)) {
2456                                         get_page(page);
2457                                         putback_lru_page(page);
2458                                 }
2459                         } else {
2460                                 migrate->src[i] = 0;
2461                                 unlock_page(page);
2462                                 migrate->cpages--;
2463
2464                                 if (!is_zone_device_page(page))
2465                                         putback_lru_page(page);
2466                                 else
2467                                         put_page(page);
2468                         }
2469                 }
2470         }
2471
2472         for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2473                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2474
2475                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2476                         continue;
2477
2478                 remove_migration_pte(page, migrate->vma, addr, page);
2479
2480                 migrate->src[i] = 0;
2481                 unlock_page(page);
2482                 put_page(page);
2483                 restore--;
2484         }
2485 }
2486
2487 /*
2488  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2489  * @migrate: migrate struct containing all migration information
2490  *
2491  * Replace page mapping (CPU page table pte) with a special migration pte entry
2492  * and check again if it has been pinned. Pinned pages are restored because we
2493  * cannot migrate them.
2494  *
2495  * This is the last step before we call the device driver callback to allocate
2496  * destination memory and copy contents of original page over to new page.
2497  */
2498 static void migrate_vma_unmap(struct migrate_vma *migrate)
2499 {
2500         int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2501         const unsigned long npages = migrate->npages;
2502         const unsigned long start = migrate->start;
2503         unsigned long addr, i, restore = 0;
2504
2505         for (i = 0; i < npages; i++) {
2506                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2507
2508                 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2509                         continue;
2510
2511                 if (page_mapped(page)) {
2512                         try_to_unmap(page, flags);
2513                         if (page_mapped(page))
2514                                 goto restore;
2515                 }
2516
2517                 if (migrate_vma_check_page(page))
2518                         continue;
2519
2520 restore:
2521                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2522                 migrate->cpages--;
2523                 restore++;
2524         }
2525
2526         for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2527                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2528
2529                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2530                         continue;
2531
2532                 remove_migration_ptes(page, page, false);
2533
2534                 migrate->src[i] = 0;
2535                 unlock_page(page);
2536                 restore--;
2537
2538                 if (is_zone_device_page(page))
2539                         put_page(page);
2540                 else
2541                         putback_lru_page(page);
2542         }
2543 }
2544
2545 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2546                                     unsigned long addr,
2547                                     struct page *page,
2548                                     unsigned long *src,
2549                                     unsigned long *dst)
2550 {
2551         struct vm_area_struct *vma = migrate->vma;
2552         struct mm_struct *mm = vma->vm_mm;
2553         struct mem_cgroup *memcg;
2554         bool flush = false;
2555         spinlock_t *ptl;
2556         pte_t entry;
2557         pgd_t *pgdp;
2558         p4d_t *p4dp;
2559         pud_t *pudp;
2560         pmd_t *pmdp;
2561         pte_t *ptep;
2562
2563         /* Only allow populating anonymous memory */
2564         if (!vma_is_anonymous(vma))
2565                 goto abort;
2566
2567         pgdp = pgd_offset(mm, addr);
2568         p4dp = p4d_alloc(mm, pgdp, addr);
2569         if (!p4dp)
2570                 goto abort;
2571         pudp = pud_alloc(mm, p4dp, addr);
2572         if (!pudp)
2573                 goto abort;
2574         pmdp = pmd_alloc(mm, pudp, addr);
2575         if (!pmdp)
2576                 goto abort;
2577
2578         if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2579                 goto abort;
2580
2581         /*
2582          * Use pte_alloc() instead of pte_alloc_map().  We can't run
2583          * pte_offset_map() on pmds where a huge pmd might be created
2584          * from a different thread.
2585          *
2586          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2587          * parallel threads are excluded by other means.
2588          *
2589          * Here we only have down_read(mmap_sem).
2590          */
2591         if (pte_alloc(mm, pmdp, addr))
2592                 goto abort;
2593
2594         /* See the comment in pte_alloc_one_map() */
2595         if (unlikely(pmd_trans_unstable(pmdp)))
2596                 goto abort;
2597
2598         if (unlikely(anon_vma_prepare(vma)))
2599                 goto abort;
2600         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2601                 goto abort;
2602
2603         /*
2604          * The memory barrier inside __SetPageUptodate makes sure that
2605          * preceding stores to the page contents become visible before
2606          * the set_pte_at() write.
2607          */
2608         __SetPageUptodate(page);
2609
2610         if (is_zone_device_page(page)) {
2611                 if (is_device_private_page(page)) {
2612                         swp_entry_t swp_entry;
2613
2614                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2615                         entry = swp_entry_to_pte(swp_entry);
2616                 } else if (is_device_public_page(page)) {
2617                         entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
2618                         if (vma->vm_flags & VM_WRITE)
2619                                 entry = pte_mkwrite(pte_mkdirty(entry));
2620                         entry = pte_mkdevmap(entry);
2621                 }
2622         } else {
2623                 entry = mk_pte(page, vma->vm_page_prot);
2624                 if (vma->vm_flags & VM_WRITE)
2625                         entry = pte_mkwrite(pte_mkdirty(entry));
2626         }
2627
2628         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2629
2630         if (pte_present(*ptep)) {
2631                 unsigned long pfn = pte_pfn(*ptep);
2632
2633                 if (!is_zero_pfn(pfn)) {
2634                         pte_unmap_unlock(ptep, ptl);
2635                         mem_cgroup_cancel_charge(page, memcg, false);
2636                         goto abort;
2637                 }
2638                 flush = true;
2639         } else if (!pte_none(*ptep)) {
2640                 pte_unmap_unlock(ptep, ptl);
2641                 mem_cgroup_cancel_charge(page, memcg, false);
2642                 goto abort;
2643         }
2644
2645         /*
2646          * Check for usefaultfd but do not deliver the fault. Instead,
2647          * just back off.
2648          */
2649         if (userfaultfd_missing(vma)) {
2650                 pte_unmap_unlock(ptep, ptl);
2651                 mem_cgroup_cancel_charge(page, memcg, false);
2652                 goto abort;
2653         }
2654
2655         inc_mm_counter(mm, MM_ANONPAGES);
2656         page_add_new_anon_rmap(page, vma, addr, false);
2657         mem_cgroup_commit_charge(page, memcg, false, false);
2658         if (!is_zone_device_page(page))
2659                 lru_cache_add_active_or_unevictable(page, vma);
2660         get_page(page);
2661
2662         if (flush) {
2663                 flush_cache_page(vma, addr, pte_pfn(*ptep));
2664                 ptep_clear_flush_notify(vma, addr, ptep);
2665                 set_pte_at_notify(mm, addr, ptep, entry);
2666                 update_mmu_cache(vma, addr, ptep);
2667         } else {
2668                 /* No need to invalidate - it was non-present before */
2669                 set_pte_at(mm, addr, ptep, entry);
2670                 update_mmu_cache(vma, addr, ptep);
2671         }
2672
2673         pte_unmap_unlock(ptep, ptl);
2674         *src = MIGRATE_PFN_MIGRATE;
2675         return;
2676
2677 abort:
2678         *src &= ~MIGRATE_PFN_MIGRATE;
2679 }
2680
2681 /*
2682  * migrate_vma_pages() - migrate meta-data from src page to dst page
2683  * @migrate: migrate struct containing all migration information
2684  *
2685  * This migrates struct page meta-data from source struct page to destination
2686  * struct page. This effectively finishes the migration from source page to the
2687  * destination page.
2688  */
2689 static void migrate_vma_pages(struct migrate_vma *migrate)
2690 {
2691         const unsigned long npages = migrate->npages;
2692         const unsigned long start = migrate->start;
2693         struct mmu_notifier_range range;
2694         unsigned long addr, i;
2695         bool notified = false;
2696
2697         for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2698                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2699                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2700                 struct address_space *mapping;
2701                 int r;
2702
2703                 if (!newpage) {
2704                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2705                         continue;
2706                 }
2707
2708                 if (!page) {
2709                         if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2710                                 continue;
2711                         }
2712                         if (!notified) {
2713                                 notified = true;
2714
2715                                 mmu_notifier_range_init(&range,
2716                                                         migrate->vma->vm_mm,
2717                                                         addr, migrate->end);
2718                                 mmu_notifier_invalidate_range_start(&range);
2719                         }
2720                         migrate_vma_insert_page(migrate, addr, newpage,
2721                                                 &migrate->src[i],
2722                                                 &migrate->dst[i]);
2723                         continue;
2724                 }
2725
2726                 mapping = page_mapping(page);
2727
2728                 if (is_zone_device_page(newpage)) {
2729                         if (is_device_private_page(newpage)) {
2730                                 /*
2731                                  * For now only support private anonymous when
2732                                  * migrating to un-addressable device memory.
2733                                  */
2734                                 if (mapping) {
2735                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2736                                         continue;
2737                                 }
2738                         } else if (!is_device_public_page(newpage)) {
2739                                 /*
2740                                  * Other types of ZONE_DEVICE page are not
2741                                  * supported.
2742                                  */
2743                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2744                                 continue;
2745                         }
2746                 }
2747
2748                 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2749                 if (r != MIGRATEPAGE_SUCCESS)
2750                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2751         }
2752
2753         /*
2754          * No need to double call mmu_notifier->invalidate_range() callback as
2755          * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2756          * did already call it.
2757          */
2758         if (notified)
2759                 mmu_notifier_invalidate_range_only_end(&range);
2760 }
2761
2762 /*
2763  * migrate_vma_finalize() - restore CPU page table entry
2764  * @migrate: migrate struct containing all migration information
2765  *
2766  * This replaces the special migration pte entry with either a mapping to the
2767  * new page if migration was successful for that page, or to the original page
2768  * otherwise.
2769  *
2770  * This also unlocks the pages and puts them back on the lru, or drops the extra
2771  * refcount, for device pages.
2772  */
2773 static void migrate_vma_finalize(struct migrate_vma *migrate)
2774 {
2775         const unsigned long npages = migrate->npages;
2776         unsigned long i;
2777
2778         for (i = 0; i < npages; i++) {
2779                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2780                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2781
2782                 if (!page) {
2783                         if (newpage) {
2784                                 unlock_page(newpage);
2785                                 put_page(newpage);
2786                         }
2787                         continue;
2788                 }
2789
2790                 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2791                         if (newpage) {
2792                                 unlock_page(newpage);
2793                                 put_page(newpage);
2794                         }
2795                         newpage = page;
2796                 }
2797
2798                 remove_migration_ptes(page, newpage, false);
2799                 unlock_page(page);
2800                 migrate->cpages--;
2801
2802                 if (is_zone_device_page(page))
2803                         put_page(page);
2804                 else
2805                         putback_lru_page(page);
2806
2807                 if (newpage != page) {
2808                         unlock_page(newpage);
2809                         if (is_zone_device_page(newpage))
2810                                 put_page(newpage);
2811                         else
2812                                 putback_lru_page(newpage);
2813                 }
2814         }
2815 }
2816
2817 /*
2818  * migrate_vma() - migrate a range of memory inside vma
2819  *
2820  * @ops: migration callback for allocating destination memory and copying
2821  * @vma: virtual memory area containing the range to be migrated
2822  * @start: start address of the range to migrate (inclusive)
2823  * @end: end address of the range to migrate (exclusive)
2824  * @src: array of hmm_pfn_t containing source pfns
2825  * @dst: array of hmm_pfn_t containing destination pfns
2826  * @private: pointer passed back to each of the callback
2827  * Returns: 0 on success, error code otherwise
2828  *
2829  * This function tries to migrate a range of memory virtual address range, using
2830  * callbacks to allocate and copy memory from source to destination. First it
2831  * collects all the pages backing each virtual address in the range, saving this
2832  * inside the src array. Then it locks those pages and unmaps them. Once the pages
2833  * are locked and unmapped, it checks whether each page is pinned or not. Pages
2834  * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
2835  * in the corresponding src array entry. It then restores any pages that are
2836  * pinned, by remapping and unlocking those pages.
2837  *
2838  * At this point it calls the alloc_and_copy() callback. For documentation on
2839  * what is expected from that callback, see struct migrate_vma_ops comments in
2840  * include/linux/migrate.h
2841  *
2842  * After the alloc_and_copy() callback, this function goes over each entry in
2843  * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2844  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2845  * then the function tries to migrate struct page information from the source
2846  * struct page to the destination struct page. If it fails to migrate the struct
2847  * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2848  * array.
2849  *
2850  * At this point all successfully migrated pages have an entry in the src
2851  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2852  * array entry with MIGRATE_PFN_VALID flag set.
2853  *
2854  * It then calls the finalize_and_map() callback. See comments for "struct
2855  * migrate_vma_ops", in include/linux/migrate.h for details about
2856  * finalize_and_map() behavior.
2857  *
2858  * After the finalize_and_map() callback, for successfully migrated pages, this
2859  * function updates the CPU page table to point to new pages, otherwise it
2860  * restores the CPU page table to point to the original source pages.
2861  *
2862  * Function returns 0 after the above steps, even if no pages were migrated
2863  * (The function only returns an error if any of the arguments are invalid.)
2864  *
2865  * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
2866  * unsigned long entries.
2867  */
2868 int migrate_vma(const struct migrate_vma_ops *ops,
2869                 struct vm_area_struct *vma,
2870                 unsigned long start,
2871                 unsigned long end,
2872                 unsigned long *src,
2873                 unsigned long *dst,
2874                 void *private)
2875 {
2876         struct migrate_vma migrate;
2877
2878         /* Sanity check the arguments */
2879         start &= PAGE_MASK;
2880         end &= PAGE_MASK;
2881         if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
2882                         vma_is_dax(vma))
2883                 return -EINVAL;
2884         if (start < vma->vm_start || start >= vma->vm_end)
2885                 return -EINVAL;
2886         if (end <= vma->vm_start || end > vma->vm_end)
2887                 return -EINVAL;
2888         if (!ops || !src || !dst || start >= end)
2889                 return -EINVAL;
2890
2891         memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
2892         migrate.src = src;
2893         migrate.dst = dst;
2894         migrate.start = start;
2895         migrate.npages = 0;
2896         migrate.cpages = 0;
2897         migrate.end = end;
2898         migrate.vma = vma;
2899
2900         /* Collect, and try to unmap source pages */
2901         migrate_vma_collect(&migrate);
2902         if (!migrate.cpages)
2903                 return 0;
2904
2905         /* Lock and isolate page */
2906         migrate_vma_prepare(&migrate);
2907         if (!migrate.cpages)
2908                 return 0;
2909
2910         /* Unmap pages */
2911         migrate_vma_unmap(&migrate);
2912         if (!migrate.cpages)
2913                 return 0;
2914
2915         /*
2916          * At this point pages are locked and unmapped, and thus they have
2917          * stable content and can safely be copied to destination memory that
2918          * is allocated by the callback.
2919          *
2920          * Note that migration can fail in migrate_vma_struct_page() for each
2921          * individual page.
2922          */
2923         ops->alloc_and_copy(vma, src, dst, start, end, private);
2924
2925         /* This does the real migration of struct page */
2926         migrate_vma_pages(&migrate);
2927
2928         ops->finalize_and_map(vma, src, dst, start, end, private);
2929
2930         /* Unlock and remap pages */
2931         migrate_vma_finalize(&migrate);
2932
2933         return 0;
2934 }
2935 EXPORT_SYMBOL(migrate_vma);
2936 #endif /* defined(MIGRATE_VMA_HELPER) */