selftests/powerpc: Don't run DSCR tests on old systems
[linux-2.6-microblaze.git] / mm / migrate.c
index d179657..34a842a 100644 (file)
@@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
                        put_page(page);
                } else {
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
                        putback_lru_page(page);
                }
        }
@@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
         */
        expected_count += is_device_private_page(page);
        if (mapping)
-               expected_count += hpage_nr_pages(page) + page_has_private(page);
+               expected_count += thp_nr_pages(page) + page_has_private(page);
 
        return expected_count;
 }
@@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        newpage->index = page->index;
        newpage->mapping = page->mapping;
-       page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
+       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
        if (PageSwapBacked(page)) {
                __SetPageSwapBacked(newpage);
                if (PageSwapCache(page)) {
@@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
+       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
 
        xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src)
        } else {
                /* thp page */
                BUG_ON(!PageTransHuge(src));
-               nr_pages = hpage_nr_pages(src);
+               nr_pages = thp_nr_pages(src);
        }
 
        for (i = 0; i < nr_pages; i++) {
@@ -1213,7 +1213,7 @@ out:
                 */
                if (likely(!__PageMovable(page)))
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
        }
 
        /*
@@ -1418,22 +1418,35 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
                enum migrate_mode mode, int reason)
 {
        int retry = 1;
+       int thp_retry = 1;
        int nr_failed = 0;
        int nr_succeeded = 0;
+       int nr_thp_succeeded = 0;
+       int nr_thp_failed = 0;
+       int nr_thp_split = 0;
        int pass = 0;
+       bool is_thp = false;
        struct page *page;
        struct page *page2;
        int swapwrite = current->flags & PF_SWAPWRITE;
-       int rc;
+       int rc, nr_subpages;
 
        if (!swapwrite)
                current->flags |= PF_SWAPWRITE;
 
-       for(pass = 0; pass < 10 && retry; pass++) {
+       for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
                retry = 0;
+               thp_retry = 0;
 
                list_for_each_entry_safe(page, page2, from, lru) {
 retry:
+                       /*
+                        * THP statistics is based on the source huge page.
+                        * Capture required information that might get lost
+                        * during migration.
+                        */
+                       is_thp = PageTransHuge(page);
+                       nr_subpages = thp_nr_pages(page);
                        cond_resched();
 
                        if (PageHuge(page))
@@ -1464,15 +1477,30 @@ retry:
                                        unlock_page(page);
                                        if (!rc) {
                                                list_safe_reset_next(page, page2, lru);
+                                               nr_thp_split++;
                                                goto retry;
                                        }
                                }
+                               if (is_thp) {
+                                       nr_thp_failed++;
+                                       nr_failed += nr_subpages;
+                                       goto out;
+                               }
                                nr_failed++;
                                goto out;
                        case -EAGAIN:
+                               if (is_thp) {
+                                       thp_retry++;
+                                       break;
+                               }
                                retry++;
                                break;
                        case MIGRATEPAGE_SUCCESS:
+                               if (is_thp) {
+                                       nr_thp_succeeded++;
+                                       nr_succeeded += nr_subpages;
+                                       break;
+                               }
                                nr_succeeded++;
                                break;
                        default:
@@ -1482,19 +1510,27 @@ retry:
                                 * removed from migration page list and not
                                 * retried in the next outer loop.
                                 */
+                               if (is_thp) {
+                                       nr_thp_failed++;
+                                       nr_failed += nr_subpages;
+                                       break;
+                               }
                                nr_failed++;
                                break;
                        }
                }
        }
-       nr_failed += retry;
+       nr_failed += retry + thp_retry;
+       nr_thp_failed += thp_retry;
        rc = nr_failed;
 out:
-       if (nr_succeeded)
-               count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
-       if (nr_failed)
-               count_vm_events(PGMIGRATE_FAIL, nr_failed);
-       trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
+       count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
+       count_vm_events(PGMIGRATE_FAIL, nr_failed);
+       count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
+       count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
+       count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
+       trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
+                              nr_thp_failed, nr_thp_split, mode, reason);
 
        if (!swapwrite)
                current->flags &= ~PF_SWAPWRITE;
@@ -1502,6 +1538,49 @@ out:
        return rc;
 }
 
+struct page *alloc_migration_target(struct page *page, unsigned long private)
+{
+       struct migration_target_control *mtc;
+       gfp_t gfp_mask;
+       unsigned int order = 0;
+       struct page *new_page = NULL;
+       int nid;
+       int zidx;
+
+       mtc = (struct migration_target_control *)private;
+       gfp_mask = mtc->gfp_mask;
+       nid = mtc->nid;
+       if (nid == NUMA_NO_NODE)
+               nid = page_to_nid(page);
+
+       if (PageHuge(page)) {
+               struct hstate *h = page_hstate(compound_head(page));
+
+               gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
+               return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
+       }
+
+       if (PageTransHuge(page)) {
+               /*
+                * clear __GFP_RECLAIM to make the migration callback
+                * consistent with regular THP allocations.
+                */
+               gfp_mask &= ~__GFP_RECLAIM;
+               gfp_mask |= GFP_TRANSHUGE;
+               order = HPAGE_PMD_ORDER;
+       }
+       zidx = zone_idx(page_zone(page));
+       if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
+               gfp_mask |= __GFP_HIGHMEM;
+
+       new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
+
+       if (new_page && PageTransHuge(new_page))
+               prep_transhuge_page(new_page);
+
+       return new_page;
+}
+
 #ifdef CONFIG_NUMA
 
 static int store_status(int __user *status, int start, int value, int nr)
@@ -1519,9 +1598,13 @@ static int do_move_pages_to_node(struct mm_struct *mm,
                struct list_head *pagelist, int node)
 {
        int err;
+       struct migration_target_control mtc = {
+               .nid = node,
+               .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+       };
 
-       err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
-                       MIGRATE_SYNC, MR_SYSCALL);
+       err = migrate_pages(pagelist, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
        if (err)
                putback_movable_pages(pagelist);
        return err;
@@ -1587,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                list_add_tail(&head->lru, pagelist);
                mod_node_page_state(page_pgdat(head),
                        NR_ISOLATED_ANON + page_is_file_lru(head),
-                       hpage_nr_pages(head));
+                       thp_nr_pages(head));
        }
 out_putpage:
        /*
@@ -1951,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 
        page_lru = page_is_file_lru(page);
        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
-                               hpage_nr_pages(page));
+                               thp_nr_pages(page));
 
        /*
         * Isolating the page has taken another reference, so the
@@ -2168,6 +2251,16 @@ static int migrate_vma_collect_hole(unsigned long start,
        struct migrate_vma *migrate = walk->private;
        unsigned long addr;
 
+       /* Only allow populating anonymous memory. */
+       if (!vma_is_anonymous(walk->vma)) {
+               for (addr = start; addr < end; addr += PAGE_SIZE) {
+                       migrate->src[migrate->npages] = 0;
+                       migrate->dst[migrate->npages] = 0;
+                       migrate->npages++;
+               }
+               return 0;
+       }
+
        for (addr = start; addr < end; addr += PAGE_SIZE) {
                migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
                migrate->dst[migrate->npages] = 0;
@@ -2260,8 +2353,10 @@ again:
                pte = *ptep;
 
                if (pte_none(pte)) {
-                       mpfn = MIGRATE_PFN_MIGRATE;
-                       migrate->cpages++;
+                       if (vma_is_anonymous(vma)) {
+                               mpfn = MIGRATE_PFN_MIGRATE;
+                               migrate->cpages++;
+                       }
                        goto next;
                }
 
@@ -2619,7 +2714,7 @@ restore:
 
 /**
  * migrate_vma_setup() - prepare to migrate a range of memory
- * @args: contains the vma, start, and and pfns arrays for the migration
+ * @args: contains the vma, start, and pfns arrays for the migration
  *
  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
  * without an error.
@@ -2830,7 +2925,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
        inc_mm_counter(mm, MM_ANONPAGES);
        page_add_new_anon_rmap(page, vma, addr, false);
        if (!is_zone_device_page(page))
-               lru_cache_add_active_or_unevictable(page, vma);
+               lru_cache_add_inactive_or_unevictable(page, vma);
        get_page(page);
 
        if (flush) {