usb: raw-gadget: add copyright
[linux-2.6-microblaze.git] / mm / migrate.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pagewalk.h>
42 #include <linux/pfn_t.h>
43 #include <linux/memremap.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/balloon_compaction.h>
46 #include <linux/mmu_notifier.h>
47 #include <linux/page_idle.h>
48 #include <linux/page_owner.h>
49 #include <linux/sched/mm.h>
50 #include <linux/ptrace.h>
51 #include <linux/oom.h>
52
53 #include <asm/tlbflush.h>
54
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/migrate.h>
57
58 #include "internal.h"
59
60 /*
61  * migrate_prep() needs to be called before we start compiling a list of pages
62  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
63  * undesirable, use migrate_prep_local()
64  */
65 void migrate_prep(void)
66 {
67         /*
68          * Clear the LRU lists so pages can be isolated.
69          * Note that pages may be moved off the LRU after we have
70          * drained them. Those pages will fail to migrate like other
71          * pages that may be busy.
72          */
73         lru_add_drain_all();
74 }
75
76 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
77 void migrate_prep_local(void)
78 {
79         lru_add_drain();
80 }
81
82 int isolate_movable_page(struct page *page, isolate_mode_t mode)
83 {
84         struct address_space *mapping;
85
86         /*
87          * Avoid burning cycles with pages that are yet under __free_pages(),
88          * or just got freed under us.
89          *
90          * In case we 'win' a race for a movable page being freed under us and
91          * raise its refcount preventing __free_pages() from doing its job
92          * the put_page() at the end of this block will take care of
93          * release this page, thus avoiding a nasty leakage.
94          */
95         if (unlikely(!get_page_unless_zero(page)))
96                 goto out;
97
98         /*
99          * Check PageMovable before holding a PG_lock because page's owner
100          * assumes anybody doesn't touch PG_lock of newly allocated page
101          * so unconditionally grabbing the lock ruins page's owner side.
102          */
103         if (unlikely(!__PageMovable(page)))
104                 goto out_putpage;
105         /*
106          * As movable pages are not isolated from LRU lists, concurrent
107          * compaction threads can race against page migration functions
108          * as well as race against the releasing a page.
109          *
110          * In order to avoid having an already isolated movable page
111          * being (wrongly) re-isolated while it is under migration,
112          * or to avoid attempting to isolate pages being released,
113          * lets be sure we have the page lock
114          * before proceeding with the movable page isolation steps.
115          */
116         if (unlikely(!trylock_page(page)))
117                 goto out_putpage;
118
119         if (!PageMovable(page) || PageIsolated(page))
120                 goto out_no_isolated;
121
122         mapping = page_mapping(page);
123         VM_BUG_ON_PAGE(!mapping, page);
124
125         if (!mapping->a_ops->isolate_page(page, mode))
126                 goto out_no_isolated;
127
128         /* Driver shouldn't use PG_isolated bit of page->flags */
129         WARN_ON_ONCE(PageIsolated(page));
130         __SetPageIsolated(page);
131         unlock_page(page);
132
133         return 0;
134
135 out_no_isolated:
136         unlock_page(page);
137 out_putpage:
138         put_page(page);
139 out:
140         return -EBUSY;
141 }
142
143 /* It should be called on page which is PG_movable */
144 void putback_movable_page(struct page *page)
145 {
146         struct address_space *mapping;
147
148         VM_BUG_ON_PAGE(!PageLocked(page), page);
149         VM_BUG_ON_PAGE(!PageMovable(page), page);
150         VM_BUG_ON_PAGE(!PageIsolated(page), page);
151
152         mapping = page_mapping(page);
153         mapping->a_ops->putback_page(page);
154         __ClearPageIsolated(page);
155 }
156
157 /*
158  * Put previously isolated pages back onto the appropriate lists
159  * from where they were once taken off for compaction/migration.
160  *
161  * This function shall be used whenever the isolated pageset has been
162  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
163  * and isolate_huge_page().
164  */
165 void putback_movable_pages(struct list_head *l)
166 {
167         struct page *page;
168         struct page *page2;
169
170         list_for_each_entry_safe(page, page2, l, lru) {
171                 if (unlikely(PageHuge(page))) {
172                         putback_active_hugepage(page);
173                         continue;
174                 }
175                 list_del(&page->lru);
176                 /*
177                  * We isolated non-lru movable page so here we can use
178                  * __PageMovable because LRU page's mapping cannot have
179                  * PAGE_MAPPING_MOVABLE.
180                  */
181                 if (unlikely(__PageMovable(page))) {
182                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
183                         lock_page(page);
184                         if (PageMovable(page))
185                                 putback_movable_page(page);
186                         else
187                                 __ClearPageIsolated(page);
188                         unlock_page(page);
189                         put_page(page);
190                 } else {
191                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
192                                         page_is_file_lru(page), -thp_nr_pages(page));
193                         putback_lru_page(page);
194                 }
195         }
196 }
197
198 /*
199  * Restore a potential migration pte to a working pte entry
200  */
201 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
202                                  unsigned long addr, void *old)
203 {
204         struct page_vma_mapped_walk pvmw = {
205                 .page = old,
206                 .vma = vma,
207                 .address = addr,
208                 .flags = PVMW_SYNC | PVMW_MIGRATION,
209         };
210         struct page *new;
211         pte_t pte;
212         swp_entry_t entry;
213
214         VM_BUG_ON_PAGE(PageTail(page), page);
215         while (page_vma_mapped_walk(&pvmw)) {
216                 if (PageKsm(page))
217                         new = page;
218                 else
219                         new = page - pvmw.page->index +
220                                 linear_page_index(vma, pvmw.address);
221
222 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
223                 /* PMD-mapped THP migration entry */
224                 if (!pvmw.pte) {
225                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
226                         remove_migration_pmd(&pvmw, new);
227                         continue;
228                 }
229 #endif
230
231                 get_page(new);
232                 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
233                 if (pte_swp_soft_dirty(*pvmw.pte))
234                         pte = pte_mksoft_dirty(pte);
235
236                 /*
237                  * Recheck VMA as permissions can change since migration started
238                  */
239                 entry = pte_to_swp_entry(*pvmw.pte);
240                 if (is_write_migration_entry(entry))
241                         pte = maybe_mkwrite(pte, vma);
242                 else if (pte_swp_uffd_wp(*pvmw.pte))
243                         pte = pte_mkuffd_wp(pte);
244
245                 if (unlikely(is_device_private_page(new))) {
246                         entry = make_device_private_entry(new, pte_write(pte));
247                         pte = swp_entry_to_pte(entry);
248                         if (pte_swp_soft_dirty(*pvmw.pte))
249                                 pte = pte_swp_mksoft_dirty(pte);
250                         if (pte_swp_uffd_wp(*pvmw.pte))
251                                 pte = pte_swp_mkuffd_wp(pte);
252                 }
253
254 #ifdef CONFIG_HUGETLB_PAGE
255                 if (PageHuge(new)) {
256                         pte = pte_mkhuge(pte);
257                         pte = arch_make_huge_pte(pte, vma, new, 0);
258                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
259                         if (PageAnon(new))
260                                 hugepage_add_anon_rmap(new, vma, pvmw.address);
261                         else
262                                 page_dup_rmap(new, true);
263                 } else
264 #endif
265                 {
266                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
267
268                         if (PageAnon(new))
269                                 page_add_anon_rmap(new, vma, pvmw.address, false);
270                         else
271                                 page_add_file_rmap(new, false);
272                 }
273                 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
274                         mlock_vma_page(new);
275
276                 if (PageTransHuge(page) && PageMlocked(page))
277                         clear_page_mlock(page);
278
279                 /* No need to invalidate - it was non-present before */
280                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
281         }
282
283         return true;
284 }
285
286 /*
287  * Get rid of all migration entries and replace them by
288  * references to the indicated page.
289  */
290 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
291 {
292         struct rmap_walk_control rwc = {
293                 .rmap_one = remove_migration_pte,
294                 .arg = old,
295         };
296
297         if (locked)
298                 rmap_walk_locked(new, &rwc);
299         else
300                 rmap_walk(new, &rwc);
301 }
302
303 /*
304  * Something used the pte of a page under migration. We need to
305  * get to the page and wait until migration is finished.
306  * When we return from this function the fault will be retried.
307  */
308 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
309                                 spinlock_t *ptl)
310 {
311         pte_t pte;
312         swp_entry_t entry;
313         struct page *page;
314
315         spin_lock(ptl);
316         pte = *ptep;
317         if (!is_swap_pte(pte))
318                 goto out;
319
320         entry = pte_to_swp_entry(pte);
321         if (!is_migration_entry(entry))
322                 goto out;
323
324         page = migration_entry_to_page(entry);
325
326         /*
327          * Once page cache replacement of page migration started, page_count
328          * is zero; but we must not call put_and_wait_on_page_locked() without
329          * a ref. Use get_page_unless_zero(), and just fault again if it fails.
330          */
331         if (!get_page_unless_zero(page))
332                 goto out;
333         pte_unmap_unlock(ptep, ptl);
334         put_and_wait_on_page_locked(page);
335         return;
336 out:
337         pte_unmap_unlock(ptep, ptl);
338 }
339
340 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
341                                 unsigned long address)
342 {
343         spinlock_t *ptl = pte_lockptr(mm, pmd);
344         pte_t *ptep = pte_offset_map(pmd, address);
345         __migration_entry_wait(mm, ptep, ptl);
346 }
347
348 void migration_entry_wait_huge(struct vm_area_struct *vma,
349                 struct mm_struct *mm, pte_t *pte)
350 {
351         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
352         __migration_entry_wait(mm, pte, ptl);
353 }
354
355 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
356 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
357 {
358         spinlock_t *ptl;
359         struct page *page;
360
361         ptl = pmd_lock(mm, pmd);
362         if (!is_pmd_migration_entry(*pmd))
363                 goto unlock;
364         page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
365         if (!get_page_unless_zero(page))
366                 goto unlock;
367         spin_unlock(ptl);
368         put_and_wait_on_page_locked(page);
369         return;
370 unlock:
371         spin_unlock(ptl);
372 }
373 #endif
374
375 static int expected_page_refs(struct address_space *mapping, struct page *page)
376 {
377         int expected_count = 1;
378
379         /*
380          * Device private pages have an extra refcount as they are
381          * ZONE_DEVICE pages.
382          */
383         expected_count += is_device_private_page(page);
384         if (mapping)
385                 expected_count += thp_nr_pages(page) + page_has_private(page);
386
387         return expected_count;
388 }
389
390 /*
391  * Replace the page in the mapping.
392  *
393  * The number of remaining references must be:
394  * 1 for anonymous pages without a mapping
395  * 2 for pages with a mapping
396  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
397  */
398 int migrate_page_move_mapping(struct address_space *mapping,
399                 struct page *newpage, struct page *page, int extra_count)
400 {
401         XA_STATE(xas, &mapping->i_pages, page_index(page));
402         struct zone *oldzone, *newzone;
403         int dirty;
404         int expected_count = expected_page_refs(mapping, page) + extra_count;
405         int nr = thp_nr_pages(page);
406
407         if (!mapping) {
408                 /* Anonymous page without mapping */
409                 if (page_count(page) != expected_count)
410                         return -EAGAIN;
411
412                 /* No turning back from here */
413                 newpage->index = page->index;
414                 newpage->mapping = page->mapping;
415                 if (PageSwapBacked(page))
416                         __SetPageSwapBacked(newpage);
417
418                 return MIGRATEPAGE_SUCCESS;
419         }
420
421         oldzone = page_zone(page);
422         newzone = page_zone(newpage);
423
424         xas_lock_irq(&xas);
425         if (page_count(page) != expected_count || xas_load(&xas) != page) {
426                 xas_unlock_irq(&xas);
427                 return -EAGAIN;
428         }
429
430         if (!page_ref_freeze(page, expected_count)) {
431                 xas_unlock_irq(&xas);
432                 return -EAGAIN;
433         }
434
435         /*
436          * Now we know that no one else is looking at the page:
437          * no turning back from here.
438          */
439         newpage->index = page->index;
440         newpage->mapping = page->mapping;
441         page_ref_add(newpage, nr); /* add cache reference */
442         if (PageSwapBacked(page)) {
443                 __SetPageSwapBacked(newpage);
444                 if (PageSwapCache(page)) {
445                         SetPageSwapCache(newpage);
446                         set_page_private(newpage, page_private(page));
447                 }
448         } else {
449                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
450         }
451
452         /* Move dirty while page refs frozen and newpage not yet exposed */
453         dirty = PageDirty(page);
454         if (dirty) {
455                 ClearPageDirty(page);
456                 SetPageDirty(newpage);
457         }
458
459         xas_store(&xas, newpage);
460         if (PageTransHuge(page)) {
461                 int i;
462
463                 for (i = 1; i < nr; i++) {
464                         xas_next(&xas);
465                         xas_store(&xas, newpage);
466                 }
467         }
468
469         /*
470          * Drop cache reference from old page by unfreezing
471          * to one less reference.
472          * We know this isn't the last reference.
473          */
474         page_ref_unfreeze(page, expected_count - nr);
475
476         xas_unlock(&xas);
477         /* Leave irq disabled to prevent preemption while updating stats */
478
479         /*
480          * If moved to a different zone then also account
481          * the page for that zone. Other VM counters will be
482          * taken care of when we establish references to the
483          * new page and drop references to the old page.
484          *
485          * Note that anonymous pages are accounted for
486          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
487          * are mapped to swap space.
488          */
489         if (newzone != oldzone) {
490                 struct lruvec *old_lruvec, *new_lruvec;
491                 struct mem_cgroup *memcg;
492
493                 memcg = page_memcg(page);
494                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
495                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
496
497                 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
498                 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
499                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
500                         __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
501                         __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
502                 }
503                 if (dirty && mapping_can_writeback(mapping)) {
504                         __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
505                         __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
506                         __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
507                         __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
508                 }
509         }
510         local_irq_enable();
511
512         return MIGRATEPAGE_SUCCESS;
513 }
514 EXPORT_SYMBOL(migrate_page_move_mapping);
515
516 /*
517  * The expected number of remaining references is the same as that
518  * of migrate_page_move_mapping().
519  */
520 int migrate_huge_page_move_mapping(struct address_space *mapping,
521                                    struct page *newpage, struct page *page)
522 {
523         XA_STATE(xas, &mapping->i_pages, page_index(page));
524         int expected_count;
525
526         xas_lock_irq(&xas);
527         expected_count = 2 + page_has_private(page);
528         if (page_count(page) != expected_count || xas_load(&xas) != page) {
529                 xas_unlock_irq(&xas);
530                 return -EAGAIN;
531         }
532
533         if (!page_ref_freeze(page, expected_count)) {
534                 xas_unlock_irq(&xas);
535                 return -EAGAIN;
536         }
537
538         newpage->index = page->index;
539         newpage->mapping = page->mapping;
540
541         get_page(newpage);
542
543         xas_store(&xas, newpage);
544
545         page_ref_unfreeze(page, expected_count - 1);
546
547         xas_unlock_irq(&xas);
548
549         return MIGRATEPAGE_SUCCESS;
550 }
551
552 /*
553  * Gigantic pages are so large that we do not guarantee that page++ pointer
554  * arithmetic will work across the entire page.  We need something more
555  * specialized.
556  */
557 static void __copy_gigantic_page(struct page *dst, struct page *src,
558                                 int nr_pages)
559 {
560         int i;
561         struct page *dst_base = dst;
562         struct page *src_base = src;
563
564         for (i = 0; i < nr_pages; ) {
565                 cond_resched();
566                 copy_highpage(dst, src);
567
568                 i++;
569                 dst = mem_map_next(dst, dst_base, i);
570                 src = mem_map_next(src, src_base, i);
571         }
572 }
573
574 static void copy_huge_page(struct page *dst, struct page *src)
575 {
576         int i;
577         int nr_pages;
578
579         if (PageHuge(src)) {
580                 /* hugetlbfs page */
581                 struct hstate *h = page_hstate(src);
582                 nr_pages = pages_per_huge_page(h);
583
584                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
585                         __copy_gigantic_page(dst, src, nr_pages);
586                         return;
587                 }
588         } else {
589                 /* thp page */
590                 BUG_ON(!PageTransHuge(src));
591                 nr_pages = thp_nr_pages(src);
592         }
593
594         for (i = 0; i < nr_pages; i++) {
595                 cond_resched();
596                 copy_highpage(dst + i, src + i);
597         }
598 }
599
600 /*
601  * Copy the page to its new location
602  */
603 void migrate_page_states(struct page *newpage, struct page *page)
604 {
605         int cpupid;
606
607         if (PageError(page))
608                 SetPageError(newpage);
609         if (PageReferenced(page))
610                 SetPageReferenced(newpage);
611         if (PageUptodate(page))
612                 SetPageUptodate(newpage);
613         if (TestClearPageActive(page)) {
614                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
615                 SetPageActive(newpage);
616         } else if (TestClearPageUnevictable(page))
617                 SetPageUnevictable(newpage);
618         if (PageWorkingset(page))
619                 SetPageWorkingset(newpage);
620         if (PageChecked(page))
621                 SetPageChecked(newpage);
622         if (PageMappedToDisk(page))
623                 SetPageMappedToDisk(newpage);
624
625         /* Move dirty on pages not done by migrate_page_move_mapping() */
626         if (PageDirty(page))
627                 SetPageDirty(newpage);
628
629         if (page_is_young(page))
630                 set_page_young(newpage);
631         if (page_is_idle(page))
632                 set_page_idle(newpage);
633
634         /*
635          * Copy NUMA information to the new page, to prevent over-eager
636          * future migrations of this same page.
637          */
638         cpupid = page_cpupid_xchg_last(page, -1);
639         page_cpupid_xchg_last(newpage, cpupid);
640
641         ksm_migrate_page(newpage, page);
642         /*
643          * Please do not reorder this without considering how mm/ksm.c's
644          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
645          */
646         if (PageSwapCache(page))
647                 ClearPageSwapCache(page);
648         ClearPagePrivate(page);
649         set_page_private(page, 0);
650
651         /*
652          * If any waiters have accumulated on the new page then
653          * wake them up.
654          */
655         if (PageWriteback(newpage))
656                 end_page_writeback(newpage);
657
658         /*
659          * PG_readahead shares the same bit with PG_reclaim.  The above
660          * end_page_writeback() may clear PG_readahead mistakenly, so set the
661          * bit after that.
662          */
663         if (PageReadahead(page))
664                 SetPageReadahead(newpage);
665
666         copy_page_owner(page, newpage);
667
668         if (!PageHuge(page))
669                 mem_cgroup_migrate(page, newpage);
670 }
671 EXPORT_SYMBOL(migrate_page_states);
672
673 void migrate_page_copy(struct page *newpage, struct page *page)
674 {
675         if (PageHuge(page) || PageTransHuge(page))
676                 copy_huge_page(newpage, page);
677         else
678                 copy_highpage(newpage, page);
679
680         migrate_page_states(newpage, page);
681 }
682 EXPORT_SYMBOL(migrate_page_copy);
683
684 /************************************************************
685  *                    Migration functions
686  ***********************************************************/
687
688 /*
689  * Common logic to directly migrate a single LRU page suitable for
690  * pages that do not use PagePrivate/PagePrivate2.
691  *
692  * Pages are locked upon entry and exit.
693  */
694 int migrate_page(struct address_space *mapping,
695                 struct page *newpage, struct page *page,
696                 enum migrate_mode mode)
697 {
698         int rc;
699
700         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
701
702         rc = migrate_page_move_mapping(mapping, newpage, page, 0);
703
704         if (rc != MIGRATEPAGE_SUCCESS)
705                 return rc;
706
707         if (mode != MIGRATE_SYNC_NO_COPY)
708                 migrate_page_copy(newpage, page);
709         else
710                 migrate_page_states(newpage, page);
711         return MIGRATEPAGE_SUCCESS;
712 }
713 EXPORT_SYMBOL(migrate_page);
714
715 #ifdef CONFIG_BLOCK
716 /* Returns true if all buffers are successfully locked */
717 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
718                                                         enum migrate_mode mode)
719 {
720         struct buffer_head *bh = head;
721
722         /* Simple case, sync compaction */
723         if (mode != MIGRATE_ASYNC) {
724                 do {
725                         lock_buffer(bh);
726                         bh = bh->b_this_page;
727
728                 } while (bh != head);
729
730                 return true;
731         }
732
733         /* async case, we cannot block on lock_buffer so use trylock_buffer */
734         do {
735                 if (!trylock_buffer(bh)) {
736                         /*
737                          * We failed to lock the buffer and cannot stall in
738                          * async migration. Release the taken locks
739                          */
740                         struct buffer_head *failed_bh = bh;
741                         bh = head;
742                         while (bh != failed_bh) {
743                                 unlock_buffer(bh);
744                                 bh = bh->b_this_page;
745                         }
746                         return false;
747                 }
748
749                 bh = bh->b_this_page;
750         } while (bh != head);
751         return true;
752 }
753
754 static int __buffer_migrate_page(struct address_space *mapping,
755                 struct page *newpage, struct page *page, enum migrate_mode mode,
756                 bool check_refs)
757 {
758         struct buffer_head *bh, *head;
759         int rc;
760         int expected_count;
761
762         if (!page_has_buffers(page))
763                 return migrate_page(mapping, newpage, page, mode);
764
765         /* Check whether page does not have extra refs before we do more work */
766         expected_count = expected_page_refs(mapping, page);
767         if (page_count(page) != expected_count)
768                 return -EAGAIN;
769
770         head = page_buffers(page);
771         if (!buffer_migrate_lock_buffers(head, mode))
772                 return -EAGAIN;
773
774         if (check_refs) {
775                 bool busy;
776                 bool invalidated = false;
777
778 recheck_buffers:
779                 busy = false;
780                 spin_lock(&mapping->private_lock);
781                 bh = head;
782                 do {
783                         if (atomic_read(&bh->b_count)) {
784                                 busy = true;
785                                 break;
786                         }
787                         bh = bh->b_this_page;
788                 } while (bh != head);
789                 if (busy) {
790                         if (invalidated) {
791                                 rc = -EAGAIN;
792                                 goto unlock_buffers;
793                         }
794                         spin_unlock(&mapping->private_lock);
795                         invalidate_bh_lrus();
796                         invalidated = true;
797                         goto recheck_buffers;
798                 }
799         }
800
801         rc = migrate_page_move_mapping(mapping, newpage, page, 0);
802         if (rc != MIGRATEPAGE_SUCCESS)
803                 goto unlock_buffers;
804
805         attach_page_private(newpage, detach_page_private(page));
806
807         bh = head;
808         do {
809                 set_bh_page(bh, newpage, bh_offset(bh));
810                 bh = bh->b_this_page;
811
812         } while (bh != head);
813
814         if (mode != MIGRATE_SYNC_NO_COPY)
815                 migrate_page_copy(newpage, page);
816         else
817                 migrate_page_states(newpage, page);
818
819         rc = MIGRATEPAGE_SUCCESS;
820 unlock_buffers:
821         if (check_refs)
822                 spin_unlock(&mapping->private_lock);
823         bh = head;
824         do {
825                 unlock_buffer(bh);
826                 bh = bh->b_this_page;
827
828         } while (bh != head);
829
830         return rc;
831 }
832
833 /*
834  * Migration function for pages with buffers. This function can only be used
835  * if the underlying filesystem guarantees that no other references to "page"
836  * exist. For example attached buffer heads are accessed only under page lock.
837  */
838 int buffer_migrate_page(struct address_space *mapping,
839                 struct page *newpage, struct page *page, enum migrate_mode mode)
840 {
841         return __buffer_migrate_page(mapping, newpage, page, mode, false);
842 }
843 EXPORT_SYMBOL(buffer_migrate_page);
844
845 /*
846  * Same as above except that this variant is more careful and checks that there
847  * are also no buffer head references. This function is the right one for
848  * mappings where buffer heads are directly looked up and referenced (such as
849  * block device mappings).
850  */
851 int buffer_migrate_page_norefs(struct address_space *mapping,
852                 struct page *newpage, struct page *page, enum migrate_mode mode)
853 {
854         return __buffer_migrate_page(mapping, newpage, page, mode, true);
855 }
856 #endif
857
858 /*
859  * Writeback a page to clean the dirty state
860  */
861 static int writeout(struct address_space *mapping, struct page *page)
862 {
863         struct writeback_control wbc = {
864                 .sync_mode = WB_SYNC_NONE,
865                 .nr_to_write = 1,
866                 .range_start = 0,
867                 .range_end = LLONG_MAX,
868                 .for_reclaim = 1
869         };
870         int rc;
871
872         if (!mapping->a_ops->writepage)
873                 /* No write method for the address space */
874                 return -EINVAL;
875
876         if (!clear_page_dirty_for_io(page))
877                 /* Someone else already triggered a write */
878                 return -EAGAIN;
879
880         /*
881          * A dirty page may imply that the underlying filesystem has
882          * the page on some queue. So the page must be clean for
883          * migration. Writeout may mean we loose the lock and the
884          * page state is no longer what we checked for earlier.
885          * At this point we know that the migration attempt cannot
886          * be successful.
887          */
888         remove_migration_ptes(page, page, false);
889
890         rc = mapping->a_ops->writepage(page, &wbc);
891
892         if (rc != AOP_WRITEPAGE_ACTIVATE)
893                 /* unlocked. Relock */
894                 lock_page(page);
895
896         return (rc < 0) ? -EIO : -EAGAIN;
897 }
898
899 /*
900  * Default handling if a filesystem does not provide a migration function.
901  */
902 static int fallback_migrate_page(struct address_space *mapping,
903         struct page *newpage, struct page *page, enum migrate_mode mode)
904 {
905         if (PageDirty(page)) {
906                 /* Only writeback pages in full synchronous migration */
907                 switch (mode) {
908                 case MIGRATE_SYNC:
909                 case MIGRATE_SYNC_NO_COPY:
910                         break;
911                 default:
912                         return -EBUSY;
913                 }
914                 return writeout(mapping, page);
915         }
916
917         /*
918          * Buffers may be managed in a filesystem specific way.
919          * We must have no buffers or drop them.
920          */
921         if (page_has_private(page) &&
922             !try_to_release_page(page, GFP_KERNEL))
923                 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
924
925         return migrate_page(mapping, newpage, page, mode);
926 }
927
928 /*
929  * Move a page to a newly allocated page
930  * The page is locked and all ptes have been successfully removed.
931  *
932  * The new page will have replaced the old page if this function
933  * is successful.
934  *
935  * Return value:
936  *   < 0 - error code
937  *  MIGRATEPAGE_SUCCESS - success
938  */
939 static int move_to_new_page(struct page *newpage, struct page *page,
940                                 enum migrate_mode mode)
941 {
942         struct address_space *mapping;
943         int rc = -EAGAIN;
944         bool is_lru = !__PageMovable(page);
945
946         VM_BUG_ON_PAGE(!PageLocked(page), page);
947         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
948
949         mapping = page_mapping(page);
950
951         if (likely(is_lru)) {
952                 if (!mapping)
953                         rc = migrate_page(mapping, newpage, page, mode);
954                 else if (mapping->a_ops->migratepage)
955                         /*
956                          * Most pages have a mapping and most filesystems
957                          * provide a migratepage callback. Anonymous pages
958                          * are part of swap space which also has its own
959                          * migratepage callback. This is the most common path
960                          * for page migration.
961                          */
962                         rc = mapping->a_ops->migratepage(mapping, newpage,
963                                                         page, mode);
964                 else
965                         rc = fallback_migrate_page(mapping, newpage,
966                                                         page, mode);
967         } else {
968                 /*
969                  * In case of non-lru page, it could be released after
970                  * isolation step. In that case, we shouldn't try migration.
971                  */
972                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
973                 if (!PageMovable(page)) {
974                         rc = MIGRATEPAGE_SUCCESS;
975                         __ClearPageIsolated(page);
976                         goto out;
977                 }
978
979                 rc = mapping->a_ops->migratepage(mapping, newpage,
980                                                 page, mode);
981                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
982                         !PageIsolated(page));
983         }
984
985         /*
986          * When successful, old pagecache page->mapping must be cleared before
987          * page is freed; but stats require that PageAnon be left as PageAnon.
988          */
989         if (rc == MIGRATEPAGE_SUCCESS) {
990                 if (__PageMovable(page)) {
991                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
992
993                         /*
994                          * We clear PG_movable under page_lock so any compactor
995                          * cannot try to migrate this page.
996                          */
997                         __ClearPageIsolated(page);
998                 }
999
1000                 /*
1001                  * Anonymous and movable page->mapping will be cleared by
1002                  * free_pages_prepare so don't reset it here for keeping
1003                  * the type to work PageAnon, for example.
1004                  */
1005                 if (!PageMappingFlags(page))
1006                         page->mapping = NULL;
1007
1008                 if (likely(!is_zone_device_page(newpage)))
1009                         flush_dcache_page(newpage);
1010
1011         }
1012 out:
1013         return rc;
1014 }
1015
1016 static int __unmap_and_move(struct page *page, struct page *newpage,
1017                                 int force, enum migrate_mode mode)
1018 {
1019         int rc = -EAGAIN;
1020         int page_was_mapped = 0;
1021         struct anon_vma *anon_vma = NULL;
1022         bool is_lru = !__PageMovable(page);
1023
1024         if (!trylock_page(page)) {
1025                 if (!force || mode == MIGRATE_ASYNC)
1026                         goto out;
1027
1028                 /*
1029                  * It's not safe for direct compaction to call lock_page.
1030                  * For example, during page readahead pages are added locked
1031                  * to the LRU. Later, when the IO completes the pages are
1032                  * marked uptodate and unlocked. However, the queueing
1033                  * could be merging multiple pages for one bio (e.g.
1034                  * mpage_readahead). If an allocation happens for the
1035                  * second or third page, the process can end up locking
1036                  * the same page twice and deadlocking. Rather than
1037                  * trying to be clever about what pages can be locked,
1038                  * avoid the use of lock_page for direct compaction
1039                  * altogether.
1040                  */
1041                 if (current->flags & PF_MEMALLOC)
1042                         goto out;
1043
1044                 lock_page(page);
1045         }
1046
1047         if (PageWriteback(page)) {
1048                 /*
1049                  * Only in the case of a full synchronous migration is it
1050                  * necessary to wait for PageWriteback. In the async case,
1051                  * the retry loop is too short and in the sync-light case,
1052                  * the overhead of stalling is too much
1053                  */
1054                 switch (mode) {
1055                 case MIGRATE_SYNC:
1056                 case MIGRATE_SYNC_NO_COPY:
1057                         break;
1058                 default:
1059                         rc = -EBUSY;
1060                         goto out_unlock;
1061                 }
1062                 if (!force)
1063                         goto out_unlock;
1064                 wait_on_page_writeback(page);
1065         }
1066
1067         /*
1068          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1069          * we cannot notice that anon_vma is freed while we migrates a page.
1070          * This get_anon_vma() delays freeing anon_vma pointer until the end
1071          * of migration. File cache pages are no problem because of page_lock()
1072          * File Caches may use write_page() or lock_page() in migration, then,
1073          * just care Anon page here.
1074          *
1075          * Only page_get_anon_vma() understands the subtleties of
1076          * getting a hold on an anon_vma from outside one of its mms.
1077          * But if we cannot get anon_vma, then we won't need it anyway,
1078          * because that implies that the anon page is no longer mapped
1079          * (and cannot be remapped so long as we hold the page lock).
1080          */
1081         if (PageAnon(page) && !PageKsm(page))
1082                 anon_vma = page_get_anon_vma(page);
1083
1084         /*
1085          * Block others from accessing the new page when we get around to
1086          * establishing additional references. We are usually the only one
1087          * holding a reference to newpage at this point. We used to have a BUG
1088          * here if trylock_page(newpage) fails, but would like to allow for
1089          * cases where there might be a race with the previous use of newpage.
1090          * This is much like races on refcount of oldpage: just don't BUG().
1091          */
1092         if (unlikely(!trylock_page(newpage)))
1093                 goto out_unlock;
1094
1095         if (unlikely(!is_lru)) {
1096                 rc = move_to_new_page(newpage, page, mode);
1097                 goto out_unlock_both;
1098         }
1099
1100         /*
1101          * Corner case handling:
1102          * 1. When a new swap-cache page is read into, it is added to the LRU
1103          * and treated as swapcache but it has no rmap yet.
1104          * Calling try_to_unmap() against a page->mapping==NULL page will
1105          * trigger a BUG.  So handle it here.
1106          * 2. An orphaned page (see truncate_cleanup_page) might have
1107          * fs-private metadata. The page can be picked up due to memory
1108          * offlining.  Everywhere else except page reclaim, the page is
1109          * invisible to the vm, so the page can not be migrated.  So try to
1110          * free the metadata, so the page can be freed.
1111          */
1112         if (!page->mapping) {
1113                 VM_BUG_ON_PAGE(PageAnon(page), page);
1114                 if (page_has_private(page)) {
1115                         try_to_free_buffers(page);
1116                         goto out_unlock_both;
1117                 }
1118         } else if (page_mapped(page)) {
1119                 /* Establish migration ptes */
1120                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1121                                 page);
1122                 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK);
1123                 page_was_mapped = 1;
1124         }
1125
1126         if (!page_mapped(page))
1127                 rc = move_to_new_page(newpage, page, mode);
1128
1129         if (page_was_mapped)
1130                 remove_migration_ptes(page,
1131                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1132
1133 out_unlock_both:
1134         unlock_page(newpage);
1135 out_unlock:
1136         /* Drop an anon_vma reference if we took one */
1137         if (anon_vma)
1138                 put_anon_vma(anon_vma);
1139         unlock_page(page);
1140 out:
1141         /*
1142          * If migration is successful, decrease refcount of the newpage
1143          * which will not free the page because new page owner increased
1144          * refcounter. As well, if it is LRU page, add the page to LRU
1145          * list in here. Use the old state of the isolated source page to
1146          * determine if we migrated a LRU page. newpage was already unlocked
1147          * and possibly modified by its owner - don't rely on the page
1148          * state.
1149          */
1150         if (rc == MIGRATEPAGE_SUCCESS) {
1151                 if (unlikely(!is_lru))
1152                         put_page(newpage);
1153                 else
1154                         putback_lru_page(newpage);
1155         }
1156
1157         return rc;
1158 }
1159
1160 /*
1161  * Obtain the lock on page, remove all ptes and migrate the page
1162  * to the newly allocated page in newpage.
1163  */
1164 static int unmap_and_move(new_page_t get_new_page,
1165                                    free_page_t put_new_page,
1166                                    unsigned long private, struct page *page,
1167                                    int force, enum migrate_mode mode,
1168                                    enum migrate_reason reason,
1169                                    struct list_head *ret)
1170 {
1171         int rc = MIGRATEPAGE_SUCCESS;
1172         struct page *newpage = NULL;
1173
1174         if (!thp_migration_supported() && PageTransHuge(page))
1175                 return -ENOSYS;
1176
1177         if (page_count(page) == 1) {
1178                 /* page was freed from under us. So we are done. */
1179                 ClearPageActive(page);
1180                 ClearPageUnevictable(page);
1181                 if (unlikely(__PageMovable(page))) {
1182                         lock_page(page);
1183                         if (!PageMovable(page))
1184                                 __ClearPageIsolated(page);
1185                         unlock_page(page);
1186                 }
1187                 goto out;
1188         }
1189
1190         newpage = get_new_page(page, private);
1191         if (!newpage)
1192                 return -ENOMEM;
1193
1194         rc = __unmap_and_move(page, newpage, force, mode);
1195         if (rc == MIGRATEPAGE_SUCCESS)
1196                 set_page_owner_migrate_reason(newpage, reason);
1197
1198 out:
1199         if (rc != -EAGAIN) {
1200                 /*
1201                  * A page that has been migrated has all references
1202                  * removed and will be freed. A page that has not been
1203                  * migrated will have kept its references and be restored.
1204                  */
1205                 list_del(&page->lru);
1206         }
1207
1208         /*
1209          * If migration is successful, releases reference grabbed during
1210          * isolation. Otherwise, restore the page to right list unless
1211          * we want to retry.
1212          */
1213         if (rc == MIGRATEPAGE_SUCCESS) {
1214                 /*
1215                  * Compaction can migrate also non-LRU pages which are
1216                  * not accounted to NR_ISOLATED_*. They can be recognized
1217                  * as __PageMovable
1218                  */
1219                 if (likely(!__PageMovable(page)))
1220                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1221                                         page_is_file_lru(page), -thp_nr_pages(page));
1222
1223                 if (reason != MR_MEMORY_FAILURE)
1224                         /*
1225                          * We release the page in page_handle_poison.
1226                          */
1227                         put_page(page);
1228         } else {
1229                 if (rc != -EAGAIN)
1230                         list_add_tail(&page->lru, ret);
1231
1232                 if (put_new_page)
1233                         put_new_page(newpage, private);
1234                 else
1235                         put_page(newpage);
1236         }
1237
1238         return rc;
1239 }
1240
1241 /*
1242  * Counterpart of unmap_and_move_page() for hugepage migration.
1243  *
1244  * This function doesn't wait the completion of hugepage I/O
1245  * because there is no race between I/O and migration for hugepage.
1246  * Note that currently hugepage I/O occurs only in direct I/O
1247  * where no lock is held and PG_writeback is irrelevant,
1248  * and writeback status of all subpages are counted in the reference
1249  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1250  * under direct I/O, the reference of the head page is 512 and a bit more.)
1251  * This means that when we try to migrate hugepage whose subpages are
1252  * doing direct I/O, some references remain after try_to_unmap() and
1253  * hugepage migration fails without data corruption.
1254  *
1255  * There is also no race when direct I/O is issued on the page under migration,
1256  * because then pte is replaced with migration swap entry and direct I/O code
1257  * will wait in the page fault for migration to complete.
1258  */
1259 static int unmap_and_move_huge_page(new_page_t get_new_page,
1260                                 free_page_t put_new_page, unsigned long private,
1261                                 struct page *hpage, int force,
1262                                 enum migrate_mode mode, int reason,
1263                                 struct list_head *ret)
1264 {
1265         int rc = -EAGAIN;
1266         int page_was_mapped = 0;
1267         struct page *new_hpage;
1268         struct anon_vma *anon_vma = NULL;
1269         struct address_space *mapping = NULL;
1270
1271         /*
1272          * Migratability of hugepages depends on architectures and their size.
1273          * This check is necessary because some callers of hugepage migration
1274          * like soft offline and memory hotremove don't walk through page
1275          * tables or check whether the hugepage is pmd-based or not before
1276          * kicking migration.
1277          */
1278         if (!hugepage_migration_supported(page_hstate(hpage))) {
1279                 list_move_tail(&hpage->lru, ret);
1280                 return -ENOSYS;
1281         }
1282
1283         new_hpage = get_new_page(hpage, private);
1284         if (!new_hpage)
1285                 return -ENOMEM;
1286
1287         if (!trylock_page(hpage)) {
1288                 if (!force)
1289                         goto out;
1290                 switch (mode) {
1291                 case MIGRATE_SYNC:
1292                 case MIGRATE_SYNC_NO_COPY:
1293                         break;
1294                 default:
1295                         goto out;
1296                 }
1297                 lock_page(hpage);
1298         }
1299
1300         /*
1301          * Check for pages which are in the process of being freed.  Without
1302          * page_mapping() set, hugetlbfs specific move page routine will not
1303          * be called and we could leak usage counts for subpools.
1304          */
1305         if (page_private(hpage) && !page_mapping(hpage)) {
1306                 rc = -EBUSY;
1307                 goto out_unlock;
1308         }
1309
1310         if (PageAnon(hpage))
1311                 anon_vma = page_get_anon_vma(hpage);
1312
1313         if (unlikely(!trylock_page(new_hpage)))
1314                 goto put_anon;
1315
1316         if (page_mapped(hpage)) {
1317                 bool mapping_locked = false;
1318                 enum ttu_flags ttu = TTU_MIGRATION|TTU_IGNORE_MLOCK;
1319
1320                 if (!PageAnon(hpage)) {
1321                         /*
1322                          * In shared mappings, try_to_unmap could potentially
1323                          * call huge_pmd_unshare.  Because of this, take
1324                          * semaphore in write mode here and set TTU_RMAP_LOCKED
1325                          * to let lower levels know we have taken the lock.
1326                          */
1327                         mapping = hugetlb_page_mapping_lock_write(hpage);
1328                         if (unlikely(!mapping))
1329                                 goto unlock_put_anon;
1330
1331                         mapping_locked = true;
1332                         ttu |= TTU_RMAP_LOCKED;
1333                 }
1334
1335                 try_to_unmap(hpage, ttu);
1336                 page_was_mapped = 1;
1337
1338                 if (mapping_locked)
1339                         i_mmap_unlock_write(mapping);
1340         }
1341
1342         if (!page_mapped(hpage))
1343                 rc = move_to_new_page(new_hpage, hpage, mode);
1344
1345         if (page_was_mapped)
1346                 remove_migration_ptes(hpage,
1347                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1348
1349 unlock_put_anon:
1350         unlock_page(new_hpage);
1351
1352 put_anon:
1353         if (anon_vma)
1354                 put_anon_vma(anon_vma);
1355
1356         if (rc == MIGRATEPAGE_SUCCESS) {
1357                 move_hugetlb_state(hpage, new_hpage, reason);
1358                 put_new_page = NULL;
1359         }
1360
1361 out_unlock:
1362         unlock_page(hpage);
1363 out:
1364         if (rc == MIGRATEPAGE_SUCCESS)
1365                 putback_active_hugepage(hpage);
1366         else if (rc != -EAGAIN && rc != MIGRATEPAGE_SUCCESS)
1367                 list_move_tail(&hpage->lru, ret);
1368
1369         /*
1370          * If migration was not successful and there's a freeing callback, use
1371          * it.  Otherwise, put_page() will drop the reference grabbed during
1372          * isolation.
1373          */
1374         if (put_new_page)
1375                 put_new_page(new_hpage, private);
1376         else
1377                 putback_active_hugepage(new_hpage);
1378
1379         return rc;
1380 }
1381
1382 static inline int try_split_thp(struct page *page, struct page **page2,
1383                                 struct list_head *from)
1384 {
1385         int rc = 0;
1386
1387         lock_page(page);
1388         rc = split_huge_page_to_list(page, from);
1389         unlock_page(page);
1390         if (!rc)
1391                 list_safe_reset_next(page, *page2, lru);
1392
1393         return rc;
1394 }
1395
1396 /*
1397  * migrate_pages - migrate the pages specified in a list, to the free pages
1398  *                 supplied as the target for the page migration
1399  *
1400  * @from:               The list of pages to be migrated.
1401  * @get_new_page:       The function used to allocate free pages to be used
1402  *                      as the target of the page migration.
1403  * @put_new_page:       The function used to free target pages if migration
1404  *                      fails, or NULL if no special handling is necessary.
1405  * @private:            Private data to be passed on to get_new_page()
1406  * @mode:               The migration mode that specifies the constraints for
1407  *                      page migration, if any.
1408  * @reason:             The reason for page migration.
1409  *
1410  * The function returns after 10 attempts or if no pages are movable any more
1411  * because the list has become empty or no retryable pages exist any more.
1412  * It is caller's responsibility to call putback_movable_pages() to return pages
1413  * to the LRU or free list only if ret != 0.
1414  *
1415  * Returns the number of pages that were not migrated, or an error code.
1416  */
1417 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1418                 free_page_t put_new_page, unsigned long private,
1419                 enum migrate_mode mode, int reason)
1420 {
1421         int retry = 1;
1422         int thp_retry = 1;
1423         int nr_failed = 0;
1424         int nr_succeeded = 0;
1425         int nr_thp_succeeded = 0;
1426         int nr_thp_failed = 0;
1427         int nr_thp_split = 0;
1428         int pass = 0;
1429         bool is_thp = false;
1430         struct page *page;
1431         struct page *page2;
1432         int swapwrite = current->flags & PF_SWAPWRITE;
1433         int rc, nr_subpages;
1434         LIST_HEAD(ret_pages);
1435
1436         if (!swapwrite)
1437                 current->flags |= PF_SWAPWRITE;
1438
1439         for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1440                 retry = 0;
1441                 thp_retry = 0;
1442
1443                 list_for_each_entry_safe(page, page2, from, lru) {
1444 retry:
1445                         /*
1446                          * THP statistics is based on the source huge page.
1447                          * Capture required information that might get lost
1448                          * during migration.
1449                          */
1450                         is_thp = PageTransHuge(page) && !PageHuge(page);
1451                         nr_subpages = thp_nr_pages(page);
1452                         cond_resched();
1453
1454                         if (PageHuge(page))
1455                                 rc = unmap_and_move_huge_page(get_new_page,
1456                                                 put_new_page, private, page,
1457                                                 pass > 2, mode, reason,
1458                                                 &ret_pages);
1459                         else
1460                                 rc = unmap_and_move(get_new_page, put_new_page,
1461                                                 private, page, pass > 2, mode,
1462                                                 reason, &ret_pages);
1463                         /*
1464                          * The rules are:
1465                          *      Success: non hugetlb page will be freed, hugetlb
1466                          *               page will be put back
1467                          *      -EAGAIN: stay on the from list
1468                          *      -ENOMEM: stay on the from list
1469                          *      Other errno: put on ret_pages list then splice to
1470                          *                   from list
1471                          */
1472                         switch(rc) {
1473                         /*
1474                          * THP migration might be unsupported or the
1475                          * allocation could've failed so we should
1476                          * retry on the same page with the THP split
1477                          * to base pages.
1478                          *
1479                          * Head page is retried immediately and tail
1480                          * pages are added to the tail of the list so
1481                          * we encounter them after the rest of the list
1482                          * is processed.
1483                          */
1484                         case -ENOSYS:
1485                                 /* THP migration is unsupported */
1486                                 if (is_thp) {
1487                                         if (!try_split_thp(page, &page2, from)) {
1488                                                 nr_thp_split++;
1489                                                 goto retry;
1490                                         }
1491
1492                                         nr_thp_failed++;
1493                                         nr_failed += nr_subpages;
1494                                         break;
1495                                 }
1496
1497                                 /* Hugetlb migration is unsupported */
1498                                 nr_failed++;
1499                                 break;
1500                         case -ENOMEM:
1501                                 /*
1502                                  * When memory is low, don't bother to try to migrate
1503                                  * other pages, just exit.
1504                                  */
1505                                 if (is_thp) {
1506                                         if (!try_split_thp(page, &page2, from)) {
1507                                                 nr_thp_split++;
1508                                                 goto retry;
1509                                         }
1510
1511                                         nr_thp_failed++;
1512                                         nr_failed += nr_subpages;
1513                                         goto out;
1514                                 }
1515                                 nr_failed++;
1516                                 goto out;
1517                         case -EAGAIN:
1518                                 if (is_thp) {
1519                                         thp_retry++;
1520                                         break;
1521                                 }
1522                                 retry++;
1523                                 break;
1524                         case MIGRATEPAGE_SUCCESS:
1525                                 if (is_thp) {
1526                                         nr_thp_succeeded++;
1527                                         nr_succeeded += nr_subpages;
1528                                         break;
1529                                 }
1530                                 nr_succeeded++;
1531                                 break;
1532                         default:
1533                                 /*
1534                                  * Permanent failure (-EBUSY, etc.):
1535                                  * unlike -EAGAIN case, the failed page is
1536                                  * removed from migration page list and not
1537                                  * retried in the next outer loop.
1538                                  */
1539                                 if (is_thp) {
1540                                         nr_thp_failed++;
1541                                         nr_failed += nr_subpages;
1542                                         break;
1543                                 }
1544                                 nr_failed++;
1545                                 break;
1546                         }
1547                 }
1548         }
1549         nr_failed += retry + thp_retry;
1550         nr_thp_failed += thp_retry;
1551         rc = nr_failed;
1552 out:
1553         /*
1554          * Put the permanent failure page back to migration list, they
1555          * will be put back to the right list by the caller.
1556          */
1557         list_splice(&ret_pages, from);
1558
1559         count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1560         count_vm_events(PGMIGRATE_FAIL, nr_failed);
1561         count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1562         count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1563         count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1564         trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
1565                                nr_thp_failed, nr_thp_split, mode, reason);
1566
1567         if (!swapwrite)
1568                 current->flags &= ~PF_SWAPWRITE;
1569
1570         return rc;
1571 }
1572
1573 struct page *alloc_migration_target(struct page *page, unsigned long private)
1574 {
1575         struct migration_target_control *mtc;
1576         gfp_t gfp_mask;
1577         unsigned int order = 0;
1578         struct page *new_page = NULL;
1579         int nid;
1580         int zidx;
1581
1582         mtc = (struct migration_target_control *)private;
1583         gfp_mask = mtc->gfp_mask;
1584         nid = mtc->nid;
1585         if (nid == NUMA_NO_NODE)
1586                 nid = page_to_nid(page);
1587
1588         if (PageHuge(page)) {
1589                 struct hstate *h = page_hstate(compound_head(page));
1590
1591                 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1592                 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1593         }
1594
1595         if (PageTransHuge(page)) {
1596                 /*
1597                  * clear __GFP_RECLAIM to make the migration callback
1598                  * consistent with regular THP allocations.
1599                  */
1600                 gfp_mask &= ~__GFP_RECLAIM;
1601                 gfp_mask |= GFP_TRANSHUGE;
1602                 order = HPAGE_PMD_ORDER;
1603         }
1604         zidx = zone_idx(page_zone(page));
1605         if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1606                 gfp_mask |= __GFP_HIGHMEM;
1607
1608         new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
1609
1610         if (new_page && PageTransHuge(new_page))
1611                 prep_transhuge_page(new_page);
1612
1613         return new_page;
1614 }
1615
1616 #ifdef CONFIG_NUMA
1617
1618 static int store_status(int __user *status, int start, int value, int nr)
1619 {
1620         while (nr-- > 0) {
1621                 if (put_user(value, status + start))
1622                         return -EFAULT;
1623                 start++;
1624         }
1625
1626         return 0;
1627 }
1628
1629 static int do_move_pages_to_node(struct mm_struct *mm,
1630                 struct list_head *pagelist, int node)
1631 {
1632         int err;
1633         struct migration_target_control mtc = {
1634                 .nid = node,
1635                 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1636         };
1637
1638         err = migrate_pages(pagelist, alloc_migration_target, NULL,
1639                         (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1640         if (err)
1641                 putback_movable_pages(pagelist);
1642         return err;
1643 }
1644
1645 /*
1646  * Resolves the given address to a struct page, isolates it from the LRU and
1647  * puts it to the given pagelist.
1648  * Returns:
1649  *     errno - if the page cannot be found/isolated
1650  *     0 - when it doesn't have to be migrated because it is already on the
1651  *         target node
1652  *     1 - when it has been queued
1653  */
1654 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1655                 int node, struct list_head *pagelist, bool migrate_all)
1656 {
1657         struct vm_area_struct *vma;
1658         struct page *page;
1659         unsigned int follflags;
1660         int err;
1661
1662         mmap_read_lock(mm);
1663         err = -EFAULT;
1664         vma = find_vma(mm, addr);
1665         if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1666                 goto out;
1667
1668         /* FOLL_DUMP to ignore special (like zero) pages */
1669         follflags = FOLL_GET | FOLL_DUMP;
1670         page = follow_page(vma, addr, follflags);
1671
1672         err = PTR_ERR(page);
1673         if (IS_ERR(page))
1674                 goto out;
1675
1676         err = -ENOENT;
1677         if (!page)
1678                 goto out;
1679
1680         err = 0;
1681         if (page_to_nid(page) == node)
1682                 goto out_putpage;
1683
1684         err = -EACCES;
1685         if (page_mapcount(page) > 1 && !migrate_all)
1686                 goto out_putpage;
1687
1688         if (PageHuge(page)) {
1689                 if (PageHead(page)) {
1690                         isolate_huge_page(page, pagelist);
1691                         err = 1;
1692                 }
1693         } else {
1694                 struct page *head;
1695
1696                 head = compound_head(page);
1697                 err = isolate_lru_page(head);
1698                 if (err)
1699                         goto out_putpage;
1700
1701                 err = 1;
1702                 list_add_tail(&head->lru, pagelist);
1703                 mod_node_page_state(page_pgdat(head),
1704                         NR_ISOLATED_ANON + page_is_file_lru(head),
1705                         thp_nr_pages(head));
1706         }
1707 out_putpage:
1708         /*
1709          * Either remove the duplicate refcount from
1710          * isolate_lru_page() or drop the page ref if it was
1711          * not isolated.
1712          */
1713         put_page(page);
1714 out:
1715         mmap_read_unlock(mm);
1716         return err;
1717 }
1718
1719 static int move_pages_and_store_status(struct mm_struct *mm, int node,
1720                 struct list_head *pagelist, int __user *status,
1721                 int start, int i, unsigned long nr_pages)
1722 {
1723         int err;
1724
1725         if (list_empty(pagelist))
1726                 return 0;
1727
1728         err = do_move_pages_to_node(mm, pagelist, node);
1729         if (err) {
1730                 /*
1731                  * Positive err means the number of failed
1732                  * pages to migrate.  Since we are going to
1733                  * abort and return the number of non-migrated
1734                  * pages, so need to include the rest of the
1735                  * nr_pages that have not been attempted as
1736                  * well.
1737                  */
1738                 if (err > 0)
1739                         err += nr_pages - i - 1;
1740                 return err;
1741         }
1742         return store_status(status, start, node, i - start);
1743 }
1744
1745 /*
1746  * Migrate an array of page address onto an array of nodes and fill
1747  * the corresponding array of status.
1748  */
1749 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1750                          unsigned long nr_pages,
1751                          const void __user * __user *pages,
1752                          const int __user *nodes,
1753                          int __user *status, int flags)
1754 {
1755         int current_node = NUMA_NO_NODE;
1756         LIST_HEAD(pagelist);
1757         int start, i;
1758         int err = 0, err1;
1759
1760         migrate_prep();
1761
1762         for (i = start = 0; i < nr_pages; i++) {
1763                 const void __user *p;
1764                 unsigned long addr;
1765                 int node;
1766
1767                 err = -EFAULT;
1768                 if (get_user(p, pages + i))
1769                         goto out_flush;
1770                 if (get_user(node, nodes + i))
1771                         goto out_flush;
1772                 addr = (unsigned long)untagged_addr(p);
1773
1774                 err = -ENODEV;
1775                 if (node < 0 || node >= MAX_NUMNODES)
1776                         goto out_flush;
1777                 if (!node_state(node, N_MEMORY))
1778                         goto out_flush;
1779
1780                 err = -EACCES;
1781                 if (!node_isset(node, task_nodes))
1782                         goto out_flush;
1783
1784                 if (current_node == NUMA_NO_NODE) {
1785                         current_node = node;
1786                         start = i;
1787                 } else if (node != current_node) {
1788                         err = move_pages_and_store_status(mm, current_node,
1789                                         &pagelist, status, start, i, nr_pages);
1790                         if (err)
1791                                 goto out;
1792                         start = i;
1793                         current_node = node;
1794                 }
1795
1796                 /*
1797                  * Errors in the page lookup or isolation are not fatal and we simply
1798                  * report them via status
1799                  */
1800                 err = add_page_for_migration(mm, addr, current_node,
1801                                 &pagelist, flags & MPOL_MF_MOVE_ALL);
1802
1803                 if (err > 0) {
1804                         /* The page is successfully queued for migration */
1805                         continue;
1806                 }
1807
1808                 /*
1809                  * If the page is already on the target node (!err), store the
1810                  * node, otherwise, store the err.
1811                  */
1812                 err = store_status(status, i, err ? : current_node, 1);
1813                 if (err)
1814                         goto out_flush;
1815
1816                 err = move_pages_and_store_status(mm, current_node, &pagelist,
1817                                 status, start, i, nr_pages);
1818                 if (err)
1819                         goto out;
1820                 current_node = NUMA_NO_NODE;
1821         }
1822 out_flush:
1823         /* Make sure we do not overwrite the existing error */
1824         err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1825                                 status, start, i, nr_pages);
1826         if (err >= 0)
1827                 err = err1;
1828 out:
1829         return err;
1830 }
1831
1832 /*
1833  * Determine the nodes of an array of pages and store it in an array of status.
1834  */
1835 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1836                                 const void __user **pages, int *status)
1837 {
1838         unsigned long i;
1839
1840         mmap_read_lock(mm);
1841
1842         for (i = 0; i < nr_pages; i++) {
1843                 unsigned long addr = (unsigned long)(*pages);
1844                 struct vm_area_struct *vma;
1845                 struct page *page;
1846                 int err = -EFAULT;
1847
1848                 vma = find_vma(mm, addr);
1849                 if (!vma || addr < vma->vm_start)
1850                         goto set_status;
1851
1852                 /* FOLL_DUMP to ignore special (like zero) pages */
1853                 page = follow_page(vma, addr, FOLL_DUMP);
1854
1855                 err = PTR_ERR(page);
1856                 if (IS_ERR(page))
1857                         goto set_status;
1858
1859                 err = page ? page_to_nid(page) : -ENOENT;
1860 set_status:
1861                 *status = err;
1862
1863                 pages++;
1864                 status++;
1865         }
1866
1867         mmap_read_unlock(mm);
1868 }
1869
1870 /*
1871  * Determine the nodes of a user array of pages and store it in
1872  * a user array of status.
1873  */
1874 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1875                          const void __user * __user *pages,
1876                          int __user *status)
1877 {
1878 #define DO_PAGES_STAT_CHUNK_NR 16
1879         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1880         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1881
1882         while (nr_pages) {
1883                 unsigned long chunk_nr;
1884
1885                 chunk_nr = nr_pages;
1886                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1887                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1888
1889                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1890                         break;
1891
1892                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1893
1894                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1895                         break;
1896
1897                 pages += chunk_nr;
1898                 status += chunk_nr;
1899                 nr_pages -= chunk_nr;
1900         }
1901         return nr_pages ? -EFAULT : 0;
1902 }
1903
1904 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1905 {
1906         struct task_struct *task;
1907         struct mm_struct *mm;
1908
1909         /*
1910          * There is no need to check if current process has the right to modify
1911          * the specified process when they are same.
1912          */
1913         if (!pid) {
1914                 mmget(current->mm);
1915                 *mem_nodes = cpuset_mems_allowed(current);
1916                 return current->mm;
1917         }
1918
1919         /* Find the mm_struct */
1920         rcu_read_lock();
1921         task = find_task_by_vpid(pid);
1922         if (!task) {
1923                 rcu_read_unlock();
1924                 return ERR_PTR(-ESRCH);
1925         }
1926         get_task_struct(task);
1927
1928         /*
1929          * Check if this process has the right to modify the specified
1930          * process. Use the regular "ptrace_may_access()" checks.
1931          */
1932         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1933                 rcu_read_unlock();
1934                 mm = ERR_PTR(-EPERM);
1935                 goto out;
1936         }
1937         rcu_read_unlock();
1938
1939         mm = ERR_PTR(security_task_movememory(task));
1940         if (IS_ERR(mm))
1941                 goto out;
1942         *mem_nodes = cpuset_mems_allowed(task);
1943         mm = get_task_mm(task);
1944 out:
1945         put_task_struct(task);
1946         if (!mm)
1947                 mm = ERR_PTR(-EINVAL);
1948         return mm;
1949 }
1950
1951 /*
1952  * Move a list of pages in the address space of the currently executing
1953  * process.
1954  */
1955 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1956                              const void __user * __user *pages,
1957                              const int __user *nodes,
1958                              int __user *status, int flags)
1959 {
1960         struct mm_struct *mm;
1961         int err;
1962         nodemask_t task_nodes;
1963
1964         /* Check flags */
1965         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1966                 return -EINVAL;
1967
1968         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1969                 return -EPERM;
1970
1971         mm = find_mm_struct(pid, &task_nodes);
1972         if (IS_ERR(mm))
1973                 return PTR_ERR(mm);
1974
1975         if (nodes)
1976                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1977                                     nodes, status, flags);
1978         else
1979                 err = do_pages_stat(mm, nr_pages, pages, status);
1980
1981         mmput(mm);
1982         return err;
1983 }
1984
1985 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1986                 const void __user * __user *, pages,
1987                 const int __user *, nodes,
1988                 int __user *, status, int, flags)
1989 {
1990         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1991 }
1992
1993 #ifdef CONFIG_COMPAT
1994 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1995                        compat_uptr_t __user *, pages32,
1996                        const int __user *, nodes,
1997                        int __user *, status,
1998                        int, flags)
1999 {
2000         const void __user * __user *pages;
2001         int i;
2002
2003         pages = compat_alloc_user_space(nr_pages * sizeof(void *));
2004         for (i = 0; i < nr_pages; i++) {
2005                 compat_uptr_t p;
2006
2007                 if (get_user(p, pages32 + i) ||
2008                         put_user(compat_ptr(p), pages + i))
2009                         return -EFAULT;
2010         }
2011         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2012 }
2013 #endif /* CONFIG_COMPAT */
2014
2015 #ifdef CONFIG_NUMA_BALANCING
2016 /*
2017  * Returns true if this is a safe migration target node for misplaced NUMA
2018  * pages. Currently it only checks the watermarks which crude
2019  */
2020 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2021                                    unsigned long nr_migrate_pages)
2022 {
2023         int z;
2024
2025         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2026                 struct zone *zone = pgdat->node_zones + z;
2027
2028                 if (!populated_zone(zone))
2029                         continue;
2030
2031                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2032                 if (!zone_watermark_ok(zone, 0,
2033                                        high_wmark_pages(zone) +
2034                                        nr_migrate_pages,
2035                                        ZONE_MOVABLE, 0))
2036                         continue;
2037                 return true;
2038         }
2039         return false;
2040 }
2041
2042 static struct page *alloc_misplaced_dst_page(struct page *page,
2043                                            unsigned long data)
2044 {
2045         int nid = (int) data;
2046         struct page *newpage;
2047
2048         newpage = __alloc_pages_node(nid,
2049                                          (GFP_HIGHUSER_MOVABLE |
2050                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
2051                                           __GFP_NORETRY | __GFP_NOWARN) &
2052                                          ~__GFP_RECLAIM, 0);
2053
2054         return newpage;
2055 }
2056
2057 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2058 {
2059         int page_lru;
2060
2061         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2062
2063         /* Avoid migrating to a node that is nearly full */
2064         if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
2065                 return 0;
2066
2067         if (isolate_lru_page(page))
2068                 return 0;
2069
2070         /*
2071          * migrate_misplaced_transhuge_page() skips page migration's usual
2072          * check on page_count(), so we must do it here, now that the page
2073          * has been isolated: a GUP pin, or any other pin, prevents migration.
2074          * The expected page count is 3: 1 for page's mapcount and 1 for the
2075          * caller's pin and 1 for the reference taken by isolate_lru_page().
2076          */
2077         if (PageTransHuge(page) && page_count(page) != 3) {
2078                 putback_lru_page(page);
2079                 return 0;
2080         }
2081
2082         page_lru = page_is_file_lru(page);
2083         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2084                                 thp_nr_pages(page));
2085
2086         /*
2087          * Isolating the page has taken another reference, so the
2088          * caller's reference can be safely dropped without the page
2089          * disappearing underneath us during migration.
2090          */
2091         put_page(page);
2092         return 1;
2093 }
2094
2095 bool pmd_trans_migrating(pmd_t pmd)
2096 {
2097         struct page *page = pmd_page(pmd);
2098         return PageLocked(page);
2099 }
2100
2101 static inline bool is_shared_exec_page(struct vm_area_struct *vma,
2102                                        struct page *page)
2103 {
2104         if (page_mapcount(page) != 1 &&
2105             (page_is_file_lru(page) || vma_is_shmem(vma)) &&
2106             (vma->vm_flags & VM_EXEC))
2107                 return true;
2108
2109         return false;
2110 }
2111
2112 /*
2113  * Attempt to migrate a misplaced page to the specified destination
2114  * node. Caller is expected to have an elevated reference count on
2115  * the page that will be dropped by this function before returning.
2116  */
2117 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2118                            int node)
2119 {
2120         pg_data_t *pgdat = NODE_DATA(node);
2121         int isolated;
2122         int nr_remaining;
2123         LIST_HEAD(migratepages);
2124
2125         /*
2126          * Don't migrate file pages that are mapped in multiple processes
2127          * with execute permissions as they are probably shared libraries.
2128          */
2129         if (is_shared_exec_page(vma, page))
2130                 goto out;
2131
2132         /*
2133          * Also do not migrate dirty pages as not all filesystems can move
2134          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2135          */
2136         if (page_is_file_lru(page) && PageDirty(page))
2137                 goto out;
2138
2139         isolated = numamigrate_isolate_page(pgdat, page);
2140         if (!isolated)
2141                 goto out;
2142
2143         list_add(&page->lru, &migratepages);
2144         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2145                                      NULL, node, MIGRATE_ASYNC,
2146                                      MR_NUMA_MISPLACED);
2147         if (nr_remaining) {
2148                 if (!list_empty(&migratepages)) {
2149                         list_del(&page->lru);
2150                         dec_node_page_state(page, NR_ISOLATED_ANON +
2151                                         page_is_file_lru(page));
2152                         putback_lru_page(page);
2153                 }
2154                 isolated = 0;
2155         } else
2156                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
2157         BUG_ON(!list_empty(&migratepages));
2158         return isolated;
2159
2160 out:
2161         put_page(page);
2162         return 0;
2163 }
2164 #endif /* CONFIG_NUMA_BALANCING */
2165
2166 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2167 /*
2168  * Migrates a THP to a given target node. page must be locked and is unlocked
2169  * before returning.
2170  */
2171 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2172                                 struct vm_area_struct *vma,
2173                                 pmd_t *pmd, pmd_t entry,
2174                                 unsigned long address,
2175                                 struct page *page, int node)
2176 {
2177         spinlock_t *ptl;
2178         pg_data_t *pgdat = NODE_DATA(node);
2179         int isolated = 0;
2180         struct page *new_page = NULL;
2181         int page_lru = page_is_file_lru(page);
2182         unsigned long start = address & HPAGE_PMD_MASK;
2183
2184         if (is_shared_exec_page(vma, page))
2185                 goto out;
2186
2187         new_page = alloc_pages_node(node,
2188                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2189                 HPAGE_PMD_ORDER);
2190         if (!new_page)
2191                 goto out_fail;
2192         prep_transhuge_page(new_page);
2193
2194         isolated = numamigrate_isolate_page(pgdat, page);
2195         if (!isolated) {
2196                 put_page(new_page);
2197                 goto out_fail;
2198         }
2199
2200         /* Prepare a page as a migration target */
2201         __SetPageLocked(new_page);
2202         if (PageSwapBacked(page))
2203                 __SetPageSwapBacked(new_page);
2204
2205         /* anon mapping, we can simply copy page->mapping to the new page: */
2206         new_page->mapping = page->mapping;
2207         new_page->index = page->index;
2208         /* flush the cache before copying using the kernel virtual address */
2209         flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
2210         migrate_page_copy(new_page, page);
2211         WARN_ON(PageLRU(new_page));
2212
2213         /* Recheck the target PMD */
2214         ptl = pmd_lock(mm, pmd);
2215         if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2216                 spin_unlock(ptl);
2217
2218                 /* Reverse changes made by migrate_page_copy() */
2219                 if (TestClearPageActive(new_page))
2220                         SetPageActive(page);
2221                 if (TestClearPageUnevictable(new_page))
2222                         SetPageUnevictable(page);
2223
2224                 unlock_page(new_page);
2225                 put_page(new_page);             /* Free it */
2226
2227                 /* Retake the callers reference and putback on LRU */
2228                 get_page(page);
2229                 putback_lru_page(page);
2230                 mod_node_page_state(page_pgdat(page),
2231                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2232
2233                 goto out_unlock;
2234         }
2235
2236         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2237         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2238
2239         /*
2240          * Overwrite the old entry under pagetable lock and establish
2241          * the new PTE. Any parallel GUP will either observe the old
2242          * page blocking on the page lock, block on the page table
2243          * lock or observe the new page. The SetPageUptodate on the
2244          * new page and page_add_new_anon_rmap guarantee the copy is
2245          * visible before the pagetable update.
2246          */
2247         page_add_anon_rmap(new_page, vma, start, true);
2248         /*
2249          * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2250          * has already been flushed globally.  So no TLB can be currently
2251          * caching this non present pmd mapping.  There's no need to clear the
2252          * pmd before doing set_pmd_at(), nor to flush the TLB after
2253          * set_pmd_at().  Clearing the pmd here would introduce a race
2254          * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2255          * mmap_lock for reading.  If the pmd is set to NULL at any given time,
2256          * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2257          * pmd.
2258          */
2259         set_pmd_at(mm, start, pmd, entry);
2260         update_mmu_cache_pmd(vma, address, &entry);
2261
2262         page_ref_unfreeze(page, 2);
2263         mlock_migrate_page(new_page, page);
2264         page_remove_rmap(page, true);
2265         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2266
2267         spin_unlock(ptl);
2268
2269         /* Take an "isolate" reference and put new page on the LRU. */
2270         get_page(new_page);
2271         putback_lru_page(new_page);
2272
2273         unlock_page(new_page);
2274         unlock_page(page);
2275         put_page(page);                 /* Drop the rmap reference */
2276         put_page(page);                 /* Drop the LRU isolation reference */
2277
2278         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2279         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2280
2281         mod_node_page_state(page_pgdat(page),
2282                         NR_ISOLATED_ANON + page_lru,
2283                         -HPAGE_PMD_NR);
2284         return isolated;
2285
2286 out_fail:
2287         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2288         ptl = pmd_lock(mm, pmd);
2289         if (pmd_same(*pmd, entry)) {
2290                 entry = pmd_modify(entry, vma->vm_page_prot);
2291                 set_pmd_at(mm, start, pmd, entry);
2292                 update_mmu_cache_pmd(vma, address, &entry);
2293         }
2294         spin_unlock(ptl);
2295
2296 out_unlock:
2297         unlock_page(page);
2298 out:
2299         put_page(page);
2300         return 0;
2301 }
2302 #endif /* CONFIG_NUMA_BALANCING */
2303
2304 #endif /* CONFIG_NUMA */
2305
2306 #ifdef CONFIG_DEVICE_PRIVATE
2307 static int migrate_vma_collect_hole(unsigned long start,
2308                                     unsigned long end,
2309                                     __always_unused int depth,
2310                                     struct mm_walk *walk)
2311 {
2312         struct migrate_vma *migrate = walk->private;
2313         unsigned long addr;
2314
2315         /* Only allow populating anonymous memory. */
2316         if (!vma_is_anonymous(walk->vma)) {
2317                 for (addr = start; addr < end; addr += PAGE_SIZE) {
2318                         migrate->src[migrate->npages] = 0;
2319                         migrate->dst[migrate->npages] = 0;
2320                         migrate->npages++;
2321                 }
2322                 return 0;
2323         }
2324
2325         for (addr = start; addr < end; addr += PAGE_SIZE) {
2326                 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2327                 migrate->dst[migrate->npages] = 0;
2328                 migrate->npages++;
2329                 migrate->cpages++;
2330         }
2331
2332         return 0;
2333 }
2334
2335 static int migrate_vma_collect_skip(unsigned long start,
2336                                     unsigned long end,
2337                                     struct mm_walk *walk)
2338 {
2339         struct migrate_vma *migrate = walk->private;
2340         unsigned long addr;
2341
2342         for (addr = start; addr < end; addr += PAGE_SIZE) {
2343                 migrate->dst[migrate->npages] = 0;
2344                 migrate->src[migrate->npages++] = 0;
2345         }
2346
2347         return 0;
2348 }
2349
2350 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2351                                    unsigned long start,
2352                                    unsigned long end,
2353                                    struct mm_walk *walk)
2354 {
2355         struct migrate_vma *migrate = walk->private;
2356         struct vm_area_struct *vma = walk->vma;
2357         struct mm_struct *mm = vma->vm_mm;
2358         unsigned long addr = start, unmapped = 0;
2359         spinlock_t *ptl;
2360         pte_t *ptep;
2361
2362 again:
2363         if (pmd_none(*pmdp))
2364                 return migrate_vma_collect_hole(start, end, -1, walk);
2365
2366         if (pmd_trans_huge(*pmdp)) {
2367                 struct page *page;
2368
2369                 ptl = pmd_lock(mm, pmdp);
2370                 if (unlikely(!pmd_trans_huge(*pmdp))) {
2371                         spin_unlock(ptl);
2372                         goto again;
2373                 }
2374
2375                 page = pmd_page(*pmdp);
2376                 if (is_huge_zero_page(page)) {
2377                         spin_unlock(ptl);
2378                         split_huge_pmd(vma, pmdp, addr);
2379                         if (pmd_trans_unstable(pmdp))
2380                                 return migrate_vma_collect_skip(start, end,
2381                                                                 walk);
2382                 } else {
2383                         int ret;
2384
2385                         get_page(page);
2386                         spin_unlock(ptl);
2387                         if (unlikely(!trylock_page(page)))
2388                                 return migrate_vma_collect_skip(start, end,
2389                                                                 walk);
2390                         ret = split_huge_page(page);
2391                         unlock_page(page);
2392                         put_page(page);
2393                         if (ret)
2394                                 return migrate_vma_collect_skip(start, end,
2395                                                                 walk);
2396                         if (pmd_none(*pmdp))
2397                                 return migrate_vma_collect_hole(start, end, -1,
2398                                                                 walk);
2399                 }
2400         }
2401
2402         if (unlikely(pmd_bad(*pmdp)))
2403                 return migrate_vma_collect_skip(start, end, walk);
2404
2405         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2406         arch_enter_lazy_mmu_mode();
2407
2408         for (; addr < end; addr += PAGE_SIZE, ptep++) {
2409                 unsigned long mpfn = 0, pfn;
2410                 struct page *page;
2411                 swp_entry_t entry;
2412                 pte_t pte;
2413
2414                 pte = *ptep;
2415
2416                 if (pte_none(pte)) {
2417                         if (vma_is_anonymous(vma)) {
2418                                 mpfn = MIGRATE_PFN_MIGRATE;
2419                                 migrate->cpages++;
2420                         }
2421                         goto next;
2422                 }
2423
2424                 if (!pte_present(pte)) {
2425                         /*
2426                          * Only care about unaddressable device page special
2427                          * page table entry. Other special swap entries are not
2428                          * migratable, and we ignore regular swapped page.
2429                          */
2430                         entry = pte_to_swp_entry(pte);
2431                         if (!is_device_private_entry(entry))
2432                                 goto next;
2433
2434                         page = device_private_entry_to_page(entry);
2435                         if (!(migrate->flags &
2436                                 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2437                             page->pgmap->owner != migrate->pgmap_owner)
2438                                 goto next;
2439
2440                         mpfn = migrate_pfn(page_to_pfn(page)) |
2441                                         MIGRATE_PFN_MIGRATE;
2442                         if (is_write_device_private_entry(entry))
2443                                 mpfn |= MIGRATE_PFN_WRITE;
2444                 } else {
2445                         if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
2446                                 goto next;
2447                         pfn = pte_pfn(pte);
2448                         if (is_zero_pfn(pfn)) {
2449                                 mpfn = MIGRATE_PFN_MIGRATE;
2450                                 migrate->cpages++;
2451                                 goto next;
2452                         }
2453                         page = vm_normal_page(migrate->vma, addr, pte);
2454                         mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2455                         mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2456                 }
2457
2458                 /* FIXME support THP */
2459                 if (!page || !page->mapping || PageTransCompound(page)) {
2460                         mpfn = 0;
2461                         goto next;
2462                 }
2463
2464                 /*
2465                  * By getting a reference on the page we pin it and that blocks
2466                  * any kind of migration. Side effect is that it "freezes" the
2467                  * pte.
2468                  *
2469                  * We drop this reference after isolating the page from the lru
2470                  * for non device page (device page are not on the lru and thus
2471                  * can't be dropped from it).
2472                  */
2473                 get_page(page);
2474                 migrate->cpages++;
2475
2476                 /*
2477                  * Optimize for the common case where page is only mapped once
2478                  * in one process. If we can lock the page, then we can safely
2479                  * set up a special migration page table entry now.
2480                  */
2481                 if (trylock_page(page)) {
2482                         pte_t swp_pte;
2483
2484                         mpfn |= MIGRATE_PFN_LOCKED;
2485                         ptep_get_and_clear(mm, addr, ptep);
2486
2487                         /* Setup special migration page table entry */
2488                         entry = make_migration_entry(page, mpfn &
2489                                                      MIGRATE_PFN_WRITE);
2490                         swp_pte = swp_entry_to_pte(entry);
2491                         if (pte_present(pte)) {
2492                                 if (pte_soft_dirty(pte))
2493                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
2494                                 if (pte_uffd_wp(pte))
2495                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
2496                         } else {
2497                                 if (pte_swp_soft_dirty(pte))
2498                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
2499                                 if (pte_swp_uffd_wp(pte))
2500                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
2501                         }
2502                         set_pte_at(mm, addr, ptep, swp_pte);
2503
2504                         /*
2505                          * This is like regular unmap: we remove the rmap and
2506                          * drop page refcount. Page won't be freed, as we took
2507                          * a reference just above.
2508                          */
2509                         page_remove_rmap(page, false);
2510                         put_page(page);
2511
2512                         if (pte_present(pte))
2513                                 unmapped++;
2514                 }
2515
2516 next:
2517                 migrate->dst[migrate->npages] = 0;
2518                 migrate->src[migrate->npages++] = mpfn;
2519         }
2520         arch_leave_lazy_mmu_mode();
2521         pte_unmap_unlock(ptep - 1, ptl);
2522
2523         /* Only flush the TLB if we actually modified any entries */
2524         if (unmapped)
2525                 flush_tlb_range(walk->vma, start, end);
2526
2527         return 0;
2528 }
2529
2530 static const struct mm_walk_ops migrate_vma_walk_ops = {
2531         .pmd_entry              = migrate_vma_collect_pmd,
2532         .pte_hole               = migrate_vma_collect_hole,
2533 };
2534
2535 /*
2536  * migrate_vma_collect() - collect pages over a range of virtual addresses
2537  * @migrate: migrate struct containing all migration information
2538  *
2539  * This will walk the CPU page table. For each virtual address backed by a
2540  * valid page, it updates the src array and takes a reference on the page, in
2541  * order to pin the page until we lock it and unmap it.
2542  */
2543 static void migrate_vma_collect(struct migrate_vma *migrate)
2544 {
2545         struct mmu_notifier_range range;
2546
2547         /*
2548          * Note that the pgmap_owner is passed to the mmu notifier callback so
2549          * that the registered device driver can skip invalidating device
2550          * private page mappings that won't be migrated.
2551          */
2552         mmu_notifier_range_init_migrate(&range, 0, migrate->vma,
2553                 migrate->vma->vm_mm, migrate->start, migrate->end,
2554                 migrate->pgmap_owner);
2555         mmu_notifier_invalidate_range_start(&range);
2556
2557         walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2558                         &migrate_vma_walk_ops, migrate);
2559
2560         mmu_notifier_invalidate_range_end(&range);
2561         migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2562 }
2563
2564 /*
2565  * migrate_vma_check_page() - check if page is pinned or not
2566  * @page: struct page to check
2567  *
2568  * Pinned pages cannot be migrated. This is the same test as in
2569  * migrate_page_move_mapping(), except that here we allow migration of a
2570  * ZONE_DEVICE page.
2571  */
2572 static bool migrate_vma_check_page(struct page *page)
2573 {
2574         /*
2575          * One extra ref because caller holds an extra reference, either from
2576          * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2577          * a device page.
2578          */
2579         int extra = 1;
2580
2581         /*
2582          * FIXME support THP (transparent huge page), it is bit more complex to
2583          * check them than regular pages, because they can be mapped with a pmd
2584          * or with a pte (split pte mapping).
2585          */
2586         if (PageCompound(page))
2587                 return false;
2588
2589         /* Page from ZONE_DEVICE have one extra reference */
2590         if (is_zone_device_page(page)) {
2591                 /*
2592                  * Private page can never be pin as they have no valid pte and
2593                  * GUP will fail for those. Yet if there is a pending migration
2594                  * a thread might try to wait on the pte migration entry and
2595                  * will bump the page reference count. Sadly there is no way to
2596                  * differentiate a regular pin from migration wait. Hence to
2597                  * avoid 2 racing thread trying to migrate back to CPU to enter
2598                  * infinite loop (one stopping migration because the other is
2599                  * waiting on pte migration entry). We always return true here.
2600                  *
2601                  * FIXME proper solution is to rework migration_entry_wait() so
2602                  * it does not need to take a reference on page.
2603                  */
2604                 return is_device_private_page(page);
2605         }
2606
2607         /* For file back page */
2608         if (page_mapping(page))
2609                 extra += 1 + page_has_private(page);
2610
2611         if ((page_count(page) - extra) > page_mapcount(page))
2612                 return false;
2613
2614         return true;
2615 }
2616
2617 /*
2618  * migrate_vma_prepare() - lock pages and isolate them from the lru
2619  * @migrate: migrate struct containing all migration information
2620  *
2621  * This locks pages that have been collected by migrate_vma_collect(). Once each
2622  * page is locked it is isolated from the lru (for non-device pages). Finally,
2623  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2624  * migrated by concurrent kernel threads.
2625  */
2626 static void migrate_vma_prepare(struct migrate_vma *migrate)
2627 {
2628         const unsigned long npages = migrate->npages;
2629         const unsigned long start = migrate->start;
2630         unsigned long addr, i, restore = 0;
2631         bool allow_drain = true;
2632
2633         lru_add_drain();
2634
2635         for (i = 0; (i < npages) && migrate->cpages; i++) {
2636                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2637                 bool remap = true;
2638
2639                 if (!page)
2640                         continue;
2641
2642                 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2643                         /*
2644                          * Because we are migrating several pages there can be
2645                          * a deadlock between 2 concurrent migration where each
2646                          * are waiting on each other page lock.
2647                          *
2648                          * Make migrate_vma() a best effort thing and backoff
2649                          * for any page we can not lock right away.
2650                          */
2651                         if (!trylock_page(page)) {
2652                                 migrate->src[i] = 0;
2653                                 migrate->cpages--;
2654                                 put_page(page);
2655                                 continue;
2656                         }
2657                         remap = false;
2658                         migrate->src[i] |= MIGRATE_PFN_LOCKED;
2659                 }
2660
2661                 /* ZONE_DEVICE pages are not on LRU */
2662                 if (!is_zone_device_page(page)) {
2663                         if (!PageLRU(page) && allow_drain) {
2664                                 /* Drain CPU's pagevec */
2665                                 lru_add_drain_all();
2666                                 allow_drain = false;
2667                         }
2668
2669                         if (isolate_lru_page(page)) {
2670                                 if (remap) {
2671                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2672                                         migrate->cpages--;
2673                                         restore++;
2674                                 } else {
2675                                         migrate->src[i] = 0;
2676                                         unlock_page(page);
2677                                         migrate->cpages--;
2678                                         put_page(page);
2679                                 }
2680                                 continue;
2681                         }
2682
2683                         /* Drop the reference we took in collect */
2684                         put_page(page);
2685                 }
2686
2687                 if (!migrate_vma_check_page(page)) {
2688                         if (remap) {
2689                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2690                                 migrate->cpages--;
2691                                 restore++;
2692
2693                                 if (!is_zone_device_page(page)) {
2694                                         get_page(page);
2695                                         putback_lru_page(page);
2696                                 }
2697                         } else {
2698                                 migrate->src[i] = 0;
2699                                 unlock_page(page);
2700                                 migrate->cpages--;
2701
2702                                 if (!is_zone_device_page(page))
2703                                         putback_lru_page(page);
2704                                 else
2705                                         put_page(page);
2706                         }
2707                 }
2708         }
2709
2710         for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2711                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2712
2713                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2714                         continue;
2715
2716                 remove_migration_pte(page, migrate->vma, addr, page);
2717
2718                 migrate->src[i] = 0;
2719                 unlock_page(page);
2720                 put_page(page);
2721                 restore--;
2722         }
2723 }
2724
2725 /*
2726  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2727  * @migrate: migrate struct containing all migration information
2728  *
2729  * Replace page mapping (CPU page table pte) with a special migration pte entry
2730  * and check again if it has been pinned. Pinned pages are restored because we
2731  * cannot migrate them.
2732  *
2733  * This is the last step before we call the device driver callback to allocate
2734  * destination memory and copy contents of original page over to new page.
2735  */
2736 static void migrate_vma_unmap(struct migrate_vma *migrate)
2737 {
2738         int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK;
2739         const unsigned long npages = migrate->npages;
2740         const unsigned long start = migrate->start;
2741         unsigned long addr, i, restore = 0;
2742
2743         for (i = 0; i < npages; i++) {
2744                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2745
2746                 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2747                         continue;
2748
2749                 if (page_mapped(page)) {
2750                         try_to_unmap(page, flags);
2751                         if (page_mapped(page))
2752                                 goto restore;
2753                 }
2754
2755                 if (migrate_vma_check_page(page))
2756                         continue;
2757
2758 restore:
2759                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2760                 migrate->cpages--;
2761                 restore++;
2762         }
2763
2764         for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2765                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2766
2767                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2768                         continue;
2769
2770                 remove_migration_ptes(page, page, false);
2771
2772                 migrate->src[i] = 0;
2773                 unlock_page(page);
2774                 restore--;
2775
2776                 if (is_zone_device_page(page))
2777                         put_page(page);
2778                 else
2779                         putback_lru_page(page);
2780         }
2781 }
2782
2783 /**
2784  * migrate_vma_setup() - prepare to migrate a range of memory
2785  * @args: contains the vma, start, and pfns arrays for the migration
2786  *
2787  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2788  * without an error.
2789  *
2790  * Prepare to migrate a range of memory virtual address range by collecting all
2791  * the pages backing each virtual address in the range, saving them inside the
2792  * src array.  Then lock those pages and unmap them. Once the pages are locked
2793  * and unmapped, check whether each page is pinned or not.  Pages that aren't
2794  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2795  * corresponding src array entry.  Then restores any pages that are pinned, by
2796  * remapping and unlocking those pages.
2797  *
2798  * The caller should then allocate destination memory and copy source memory to
2799  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2800  * flag set).  Once these are allocated and copied, the caller must update each
2801  * corresponding entry in the dst array with the pfn value of the destination
2802  * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2803  * (destination pages must have their struct pages locked, via lock_page()).
2804  *
2805  * Note that the caller does not have to migrate all the pages that are marked
2806  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2807  * device memory to system memory.  If the caller cannot migrate a device page
2808  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2809  * consequences for the userspace process, so it must be avoided if at all
2810  * possible.
2811  *
2812  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2813  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2814  * allowing the caller to allocate device memory for those unback virtual
2815  * address.  For this the caller simply has to allocate device memory and
2816  * properly set the destination entry like for regular migration.  Note that
2817  * this can still fails and thus inside the device driver must check if the
2818  * migration was successful for those entries after calling migrate_vma_pages()
2819  * just like for regular migration.
2820  *
2821  * After that, the callers must call migrate_vma_pages() to go over each entry
2822  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2823  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2824  * then migrate_vma_pages() to migrate struct page information from the source
2825  * struct page to the destination struct page.  If it fails to migrate the
2826  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2827  * src array.
2828  *
2829  * At this point all successfully migrated pages have an entry in the src
2830  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2831  * array entry with MIGRATE_PFN_VALID flag set.
2832  *
2833  * Once migrate_vma_pages() returns the caller may inspect which pages were
2834  * successfully migrated, and which were not.  Successfully migrated pages will
2835  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2836  *
2837  * It is safe to update device page table after migrate_vma_pages() because
2838  * both destination and source page are still locked, and the mmap_lock is held
2839  * in read mode (hence no one can unmap the range being migrated).
2840  *
2841  * Once the caller is done cleaning up things and updating its page table (if it
2842  * chose to do so, this is not an obligation) it finally calls
2843  * migrate_vma_finalize() to update the CPU page table to point to new pages
2844  * for successfully migrated pages or otherwise restore the CPU page table to
2845  * point to the original source pages.
2846  */
2847 int migrate_vma_setup(struct migrate_vma *args)
2848 {
2849         long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2850
2851         args->start &= PAGE_MASK;
2852         args->end &= PAGE_MASK;
2853         if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2854             (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2855                 return -EINVAL;
2856         if (nr_pages <= 0)
2857                 return -EINVAL;
2858         if (args->start < args->vma->vm_start ||
2859             args->start >= args->vma->vm_end)
2860                 return -EINVAL;
2861         if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2862                 return -EINVAL;
2863         if (!args->src || !args->dst)
2864                 return -EINVAL;
2865
2866         memset(args->src, 0, sizeof(*args->src) * nr_pages);
2867         args->cpages = 0;
2868         args->npages = 0;
2869
2870         migrate_vma_collect(args);
2871
2872         if (args->cpages)
2873                 migrate_vma_prepare(args);
2874         if (args->cpages)
2875                 migrate_vma_unmap(args);
2876
2877         /*
2878          * At this point pages are locked and unmapped, and thus they have
2879          * stable content and can safely be copied to destination memory that
2880          * is allocated by the drivers.
2881          */
2882         return 0;
2883
2884 }
2885 EXPORT_SYMBOL(migrate_vma_setup);
2886
2887 /*
2888  * This code closely matches the code in:
2889  *   __handle_mm_fault()
2890  *     handle_pte_fault()
2891  *       do_anonymous_page()
2892  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2893  * private page.
2894  */
2895 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2896                                     unsigned long addr,
2897                                     struct page *page,
2898                                     unsigned long *src)
2899 {
2900         struct vm_area_struct *vma = migrate->vma;
2901         struct mm_struct *mm = vma->vm_mm;
2902         bool flush = false;
2903         spinlock_t *ptl;
2904         pte_t entry;
2905         pgd_t *pgdp;
2906         p4d_t *p4dp;
2907         pud_t *pudp;
2908         pmd_t *pmdp;
2909         pte_t *ptep;
2910
2911         /* Only allow populating anonymous memory */
2912         if (!vma_is_anonymous(vma))
2913                 goto abort;
2914
2915         pgdp = pgd_offset(mm, addr);
2916         p4dp = p4d_alloc(mm, pgdp, addr);
2917         if (!p4dp)
2918                 goto abort;
2919         pudp = pud_alloc(mm, p4dp, addr);
2920         if (!pudp)
2921                 goto abort;
2922         pmdp = pmd_alloc(mm, pudp, addr);
2923         if (!pmdp)
2924                 goto abort;
2925
2926         if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2927                 goto abort;
2928
2929         /*
2930          * Use pte_alloc() instead of pte_alloc_map().  We can't run
2931          * pte_offset_map() on pmds where a huge pmd might be created
2932          * from a different thread.
2933          *
2934          * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
2935          * parallel threads are excluded by other means.
2936          *
2937          * Here we only have mmap_read_lock(mm).
2938          */
2939         if (pte_alloc(mm, pmdp))
2940                 goto abort;
2941
2942         /* See the comment in pte_alloc_one_map() */
2943         if (unlikely(pmd_trans_unstable(pmdp)))
2944                 goto abort;
2945
2946         if (unlikely(anon_vma_prepare(vma)))
2947                 goto abort;
2948         if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
2949                 goto abort;
2950
2951         /*
2952          * The memory barrier inside __SetPageUptodate makes sure that
2953          * preceding stores to the page contents become visible before
2954          * the set_pte_at() write.
2955          */
2956         __SetPageUptodate(page);
2957
2958         if (is_zone_device_page(page)) {
2959                 if (is_device_private_page(page)) {
2960                         swp_entry_t swp_entry;
2961
2962                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2963                         entry = swp_entry_to_pte(swp_entry);
2964                 }
2965         } else {
2966                 entry = mk_pte(page, vma->vm_page_prot);
2967                 if (vma->vm_flags & VM_WRITE)
2968                         entry = pte_mkwrite(pte_mkdirty(entry));
2969         }
2970
2971         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2972
2973         if (check_stable_address_space(mm))
2974                 goto unlock_abort;
2975
2976         if (pte_present(*ptep)) {
2977                 unsigned long pfn = pte_pfn(*ptep);
2978
2979                 if (!is_zero_pfn(pfn))
2980                         goto unlock_abort;
2981                 flush = true;
2982         } else if (!pte_none(*ptep))
2983                 goto unlock_abort;
2984
2985         /*
2986          * Check for userfaultfd but do not deliver the fault. Instead,
2987          * just back off.
2988          */
2989         if (userfaultfd_missing(vma))
2990                 goto unlock_abort;
2991
2992         inc_mm_counter(mm, MM_ANONPAGES);
2993         page_add_new_anon_rmap(page, vma, addr, false);
2994         if (!is_zone_device_page(page))
2995                 lru_cache_add_inactive_or_unevictable(page, vma);
2996         get_page(page);
2997
2998         if (flush) {
2999                 flush_cache_page(vma, addr, pte_pfn(*ptep));
3000                 ptep_clear_flush_notify(vma, addr, ptep);
3001                 set_pte_at_notify(mm, addr, ptep, entry);
3002                 update_mmu_cache(vma, addr, ptep);
3003         } else {
3004                 /* No need to invalidate - it was non-present before */
3005                 set_pte_at(mm, addr, ptep, entry);
3006                 update_mmu_cache(vma, addr, ptep);
3007         }
3008
3009         pte_unmap_unlock(ptep, ptl);
3010         *src = MIGRATE_PFN_MIGRATE;
3011         return;
3012
3013 unlock_abort:
3014         pte_unmap_unlock(ptep, ptl);
3015 abort:
3016         *src &= ~MIGRATE_PFN_MIGRATE;
3017 }
3018
3019 /**
3020  * migrate_vma_pages() - migrate meta-data from src page to dst page
3021  * @migrate: migrate struct containing all migration information
3022  *
3023  * This migrates struct page meta-data from source struct page to destination
3024  * struct page. This effectively finishes the migration from source page to the
3025  * destination page.
3026  */
3027 void migrate_vma_pages(struct migrate_vma *migrate)
3028 {
3029         const unsigned long npages = migrate->npages;
3030         const unsigned long start = migrate->start;
3031         struct mmu_notifier_range range;
3032         unsigned long addr, i;
3033         bool notified = false;
3034
3035         for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
3036                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3037                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
3038                 struct address_space *mapping;
3039                 int r;
3040
3041                 if (!newpage) {
3042                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3043                         continue;
3044                 }
3045
3046                 if (!page) {
3047                         if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
3048                                 continue;
3049                         if (!notified) {
3050                                 notified = true;
3051
3052                                 mmu_notifier_range_init_migrate(&range, 0,
3053                                         migrate->vma, migrate->vma->vm_mm,
3054                                         addr, migrate->end,
3055                                         migrate->pgmap_owner);
3056                                 mmu_notifier_invalidate_range_start(&range);
3057                         }
3058                         migrate_vma_insert_page(migrate, addr, newpage,
3059                                                 &migrate->src[i]);
3060                         continue;
3061                 }
3062
3063                 mapping = page_mapping(page);
3064
3065                 if (is_zone_device_page(newpage)) {
3066                         if (is_device_private_page(newpage)) {
3067                                 /*
3068                                  * For now only support private anonymous when
3069                                  * migrating to un-addressable device memory.
3070                                  */
3071                                 if (mapping) {
3072                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3073                                         continue;
3074                                 }
3075                         } else {
3076                                 /*
3077                                  * Other types of ZONE_DEVICE page are not
3078                                  * supported.
3079                                  */
3080                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3081                                 continue;
3082                         }
3083                 }
3084
3085                 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
3086                 if (r != MIGRATEPAGE_SUCCESS)
3087                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3088         }
3089
3090         /*
3091          * No need to double call mmu_notifier->invalidate_range() callback as
3092          * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
3093          * did already call it.
3094          */
3095         if (notified)
3096                 mmu_notifier_invalidate_range_only_end(&range);
3097 }
3098 EXPORT_SYMBOL(migrate_vma_pages);
3099
3100 /**
3101  * migrate_vma_finalize() - restore CPU page table entry
3102  * @migrate: migrate struct containing all migration information
3103  *
3104  * This replaces the special migration pte entry with either a mapping to the
3105  * new page if migration was successful for that page, or to the original page
3106  * otherwise.
3107  *
3108  * This also unlocks the pages and puts them back on the lru, or drops the extra
3109  * refcount, for device pages.
3110  */
3111 void migrate_vma_finalize(struct migrate_vma *migrate)
3112 {
3113         const unsigned long npages = migrate->npages;
3114         unsigned long i;
3115
3116         for (i = 0; i < npages; i++) {
3117                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3118                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
3119
3120                 if (!page) {
3121                         if (newpage) {
3122                                 unlock_page(newpage);
3123                                 put_page(newpage);
3124                         }
3125                         continue;
3126                 }
3127
3128                 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
3129                         if (newpage) {
3130                                 unlock_page(newpage);
3131                                 put_page(newpage);
3132                         }
3133                         newpage = page;
3134                 }
3135
3136                 remove_migration_ptes(page, newpage, false);
3137                 unlock_page(page);
3138
3139                 if (is_zone_device_page(page))
3140                         put_page(page);
3141                 else
3142                         putback_lru_page(page);
3143
3144                 if (newpage != page) {
3145                         unlock_page(newpage);
3146                         if (is_zone_device_page(newpage))
3147                                 put_page(newpage);
3148                         else
3149                                 putback_lru_page(newpage);
3150                 }
3151         }
3152 }
3153 EXPORT_SYMBOL(migrate_vma_finalize);
3154 #endif /* CONFIG_DEVICE_PRIVATE */