mm,thp: add read-only THP support for (non-shmem) FS
[linux-2.6-microblaze.git] / mm / compaction.c
index 952dc2f..ce08b39 100644 (file)
@@ -969,7 +969,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                         * is safe to read and it's 0 for tail pages.
                         */
                        if (unlikely(PageCompound(page))) {
-                               low_pfn += (1UL << compound_order(page)) - 1;
+                               low_pfn += compound_nr(page) - 1;
                                goto isolate_fail;
                        }
                }
@@ -1737,8 +1737,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
  * starting at the block pointed to by the migrate scanner pfn within
  * compact_control.
  */
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
-                                       struct compact_control *cc)
+static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
 {
        unsigned long block_start_pfn;
        unsigned long block_end_pfn;
@@ -1756,8 +1755,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
         */
        low_pfn = fast_find_migrateblock(cc);
        block_start_pfn = pageblock_start_pfn(low_pfn);
-       if (block_start_pfn < zone->zone_start_pfn)
-               block_start_pfn = zone->zone_start_pfn;
+       if (block_start_pfn < cc->zone->zone_start_pfn)
+               block_start_pfn = cc->zone->zone_start_pfn;
 
        /*
         * fast_find_migrateblock marks a pageblock skipped so to avoid
@@ -1787,8 +1786,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
                        cond_resched();
 
-               page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
-                                                                       zone);
+               page = pageblock_pfn_to_page(block_start_pfn,
+                                               block_end_pfn, cc->zone);
                if (!page)
                        continue;
 
@@ -2078,6 +2077,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
        const bool sync = cc->mode != MIGRATE_ASYNC;
        bool update_cached;
 
+       /*
+        * These counters track activities during zone compaction.  Initialize
+        * them before compacting a new zone.
+        */
+       cc->total_migrate_scanned = 0;
+       cc->total_free_scanned = 0;
+       cc->nr_migratepages = 0;
+       cc->nr_freepages = 0;
+       INIT_LIST_HEAD(&cc->freepages);
+       INIT_LIST_HEAD(&cc->migratepages);
+
        cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
        ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
                                                        cc->classzone_idx);
@@ -2158,7 +2168,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
                        cc->rescan = true;
                }
 
-               switch (isolate_migratepages(cc->zone, cc)) {
+               switch (isolate_migratepages(cc)) {
                case ISOLATE_ABORT:
                        ret = COMPACT_CONTENDED;
                        putback_movable_pages(&cc->migratepages);
@@ -2281,10 +2291,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 {
        enum compact_result ret;
        struct compact_control cc = {
-               .nr_freepages = 0,
-               .nr_migratepages = 0,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .order = order,
                .search_order = order,
                .gfp_mask = gfp_mask,
@@ -2305,8 +2311,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
 
        if (capture)
                current->capture_control = &capc;
-       INIT_LIST_HEAD(&cc.freepages);
-       INIT_LIST_HEAD(&cc.migratepages);
 
        ret = compact_zone(&cc, &capc);
 
@@ -2408,8 +2412,6 @@ static void compact_node(int nid)
        struct zone *zone;
        struct compact_control cc = {
                .order = -1,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
                .whole_zone = true,
@@ -2423,11 +2425,7 @@ static void compact_node(int nid)
                if (!populated_zone(zone))
                        continue;
 
-               cc.nr_freepages = 0;
-               cc.nr_migratepages = 0;
                cc.zone = zone;
-               INIT_LIST_HEAD(&cc.freepages);
-               INIT_LIST_HEAD(&cc.migratepages);
 
                compact_zone(&cc, NULL);
 
@@ -2529,8 +2527,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
        struct compact_control cc = {
                .order = pgdat->kcompactd_max_order,
                .search_order = pgdat->kcompactd_max_order,
-               .total_migrate_scanned = 0,
-               .total_free_scanned = 0,
                .classzone_idx = pgdat->kcompactd_classzone_idx,
                .mode = MIGRATE_SYNC_LIGHT,
                .ignore_skip_hint = false,
@@ -2554,16 +2550,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                                                        COMPACT_CONTINUE)
                        continue;
 
-               cc.nr_freepages = 0;
-               cc.nr_migratepages = 0;
-               cc.total_migrate_scanned = 0;
-               cc.total_free_scanned = 0;
-               cc.zone = zone;
-               INIT_LIST_HEAD(&cc.freepages);
-               INIT_LIST_HEAD(&cc.migratepages);
-
                if (kthread_should_stop())
                        return;
+
+               cc.zone = zone;
                status = compact_zone(&cc, NULL);
 
                if (status == COMPACT_SUCCESS) {