selftests/powerpc: Don't run DSCR tests on old systems
[linux-2.6-microblaze.git] / mm / migrate.c
index 8d084e9..34a842a 100644 (file)
@@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
                        put_page(page);
                } else {
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
                        putback_lru_page(page);
                }
        }
@@ -386,7 +386,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page)
         */
        expected_count += is_device_private_page(page);
        if (mapping)
-               expected_count += hpage_nr_pages(page) + page_has_private(page);
+               expected_count += thp_nr_pages(page) + page_has_private(page);
 
        return expected_count;
 }
@@ -441,7 +441,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        newpage->index = page->index;
        newpage->mapping = page->mapping;
-       page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
+       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
        if (PageSwapBacked(page)) {
                __SetPageSwapBacked(newpage);
                if (PageSwapCache(page)) {
@@ -474,7 +474,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
+       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
 
        xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -591,7 +591,7 @@ static void copy_huge_page(struct page *dst, struct page *src)
        } else {
                /* thp page */
                BUG_ON(!PageTransHuge(src));
-               nr_pages = hpage_nr_pages(src);
+               nr_pages = thp_nr_pages(src);
        }
 
        for (i = 0; i < nr_pages; i++) {
@@ -1213,7 +1213,7 @@ out:
                 */
                if (likely(!__PageMovable(page)))
                        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-                                       page_is_file_lru(page), -hpage_nr_pages(page));
+                                       page_is_file_lru(page), -thp_nr_pages(page));
        }
 
        /*
@@ -1446,7 +1446,7 @@ retry:
                         * during migration.
                         */
                        is_thp = PageTransHuge(page);
-                       nr_subpages = hpage_nr_pages(page);
+                       nr_subpages = thp_nr_pages(page);
                        cond_resched();
 
                        if (PageHuge(page))
@@ -1538,31 +1538,42 @@ out:
        return rc;
 }
 
-struct page *new_page_nodemask(struct page *page,
-                               int preferred_nid, nodemask_t *nodemask)
+struct page *alloc_migration_target(struct page *page, unsigned long private)
 {
-       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+       struct migration_target_control *mtc;
+       gfp_t gfp_mask;
        unsigned int order = 0;
        struct page *new_page = NULL;
+       int nid;
+       int zidx;
+
+       mtc = (struct migration_target_control *)private;
+       gfp_mask = mtc->gfp_mask;
+       nid = mtc->nid;
+       if (nid == NUMA_NO_NODE)
+               nid = page_to_nid(page);
 
        if (PageHuge(page)) {
                struct hstate *h = page_hstate(compound_head(page));
 
-               gfp_mask = htlb_alloc_mask(h);
-               return alloc_huge_page_nodemask(h, preferred_nid,
-                                               nodemask, gfp_mask);
+               gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
+               return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
        }
 
        if (PageTransHuge(page)) {
+               /*
+                * clear __GFP_RECLAIM to make the migration callback
+                * consistent with regular THP allocations.
+                */
+               gfp_mask &= ~__GFP_RECLAIM;
                gfp_mask |= GFP_TRANSHUGE;
                order = HPAGE_PMD_ORDER;
        }
-
-       if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
+       zidx = zone_idx(page_zone(page));
+       if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
                gfp_mask |= __GFP_HIGHMEM;
 
-       new_page = __alloc_pages_nodemask(gfp_mask, order,
-                               preferred_nid, nodemask);
+       new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
 
        if (new_page && PageTransHuge(new_page))
                prep_transhuge_page(new_page);
@@ -1587,9 +1598,13 @@ static int do_move_pages_to_node(struct mm_struct *mm,
                struct list_head *pagelist, int node)
 {
        int err;
+       struct migration_target_control mtc = {
+               .nid = node,
+               .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+       };
 
-       err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
-                       MIGRATE_SYNC, MR_SYSCALL);
+       err = migrate_pages(pagelist, alloc_migration_target, NULL,
+                       (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
        if (err)
                putback_movable_pages(pagelist);
        return err;
@@ -1655,7 +1670,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
                list_add_tail(&head->lru, pagelist);
                mod_node_page_state(page_pgdat(head),
                        NR_ISOLATED_ANON + page_is_file_lru(head),
-                       hpage_nr_pages(head));
+                       thp_nr_pages(head));
        }
 out_putpage:
        /*
@@ -2019,7 +2034,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 
        page_lru = page_is_file_lru(page);
        mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
-                               hpage_nr_pages(page));
+                               thp_nr_pages(page));
 
        /*
         * Isolating the page has taken another reference, so the