Merge tag 'arm-soc-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / mm / compaction.c
index 3aef10c..e04f447 100644 (file)
@@ -137,7 +137,6 @@ EXPORT_SYMBOL(__SetPageMovable);
 
 void __ClearPageMovable(struct page *page)
 {
-       VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(!PageMovable(page), page);
        /*
         * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
@@ -995,7 +994,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if (!TestClearPageLRU(page))
                        goto isolate_fail_put;
 
-               rcu_read_lock();
                lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
                /* If we already hold the lock, we can skip some rechecking */
@@ -1005,7 +1003,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
                        compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
                        locked = lruvec;
-                       rcu_read_unlock();
 
                        lruvec_memcg_debug(lruvec, page);
 
@@ -1026,8 +1023,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                                SetPageLRU(page);
                                goto isolate_fail_put;
                        }
-               } else
-                       rcu_read_unlock();
+               }
 
                /* The whole page is taken off the LRU; skip the tail pages. */
                if (PageCompound(page))
@@ -1288,7 +1284,7 @@ static void
 fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
 {
        unsigned long start_pfn, end_pfn;
-       struct page *page = pfn_to_page(pfn);
+       struct page *page;
 
        /* Do not search around if there are enough pages already */
        if (cc->nr_freepages >= cc->nr_migratepages)
@@ -1299,8 +1295,12 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
                return;
 
        /* Pageblock boundaries */
-       start_pfn = pageblock_start_pfn(pfn);
-       end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
+       start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
+       end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
+
+       page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
+       if (!page)
+               return;
 
        /* Scan before */
        if (start_pfn != pfn) {
@@ -1402,7 +1402,8 @@ fast_isolate_freepages(struct compact_control *cc)
                        pfn = page_to_pfn(freepage);
 
                        if (pfn >= highest)
-                               highest = pageblock_start_pfn(pfn);
+                               highest = max(pageblock_start_pfn(pfn),
+                                             cc->zone->zone_start_pfn);
 
                        if (pfn >= low_pfn) {
                                cc->fast_search_fail = 0;
@@ -1472,7 +1473,8 @@ fast_isolate_freepages(struct compact_control *cc)
                        } else {
                                if (cc->direct_compaction && pfn_valid(min_pfn)) {
                                        page = pageblock_pfn_to_page(min_pfn,
-                                               pageblock_end_pfn(min_pfn),
+                                               min(pageblock_end_pfn(min_pfn),
+                                                   zone_end_pfn(cc->zone)),
                                                cc->zone);
                                        cc->free_pfn = min_pfn;
                                }
@@ -1702,6 +1704,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
        unsigned long pfn = cc->migrate_pfn;
        unsigned long high_pfn;
        int order;
+       bool found_block = false;
 
        /* Skip hints are relied on to avoid repeats on the fast search */
        if (cc->ignore_skip_hint)
@@ -1744,7 +1747,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
        high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
 
        for (order = cc->order - 1;
-            order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
+            order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
             order--) {
                struct free_area *area = &cc->zone->free_area[order];
                struct list_head *freelist;
@@ -1759,7 +1762,11 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
                list_for_each_entry(freepage, freelist, lru) {
                        unsigned long free_pfn;
 
-                       nr_scanned++;
+                       if (nr_scanned++ >= limit) {
+                               move_freelist_tail(freelist, freepage);
+                               break;
+                       }
+
                        free_pfn = page_to_pfn(freepage);
                        if (free_pfn < high_pfn) {
                                /*
@@ -1768,12 +1775,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
                                 * the list assumes an entry is deleted, not
                                 * reordered.
                                 */
-                               if (get_pageblock_skip(freepage)) {
-                                       if (list_is_last(freelist, &freepage->lru))
-                                               break;
-
+                               if (get_pageblock_skip(freepage))
                                        continue;
-                               }
 
                                /* Reorder to so a future search skips recent pages */
                                move_freelist_tail(freelist, freepage);
@@ -1781,15 +1784,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
                                update_fast_start_pfn(cc, free_pfn);
                                pfn = pageblock_start_pfn(free_pfn);
                                cc->fast_search_fail = 0;
+                               found_block = true;
                                set_pageblock_skip(freepage);
                                break;
                        }
-
-                       if (nr_scanned >= limit) {
-                               cc->fast_search_fail++;
-                               move_freelist_tail(freelist, freepage);
-                               break;
-                       }
                }
                spin_unlock_irqrestore(&cc->zone->lock, flags);
        }
@@ -1800,9 +1798,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
         * If fast scanning failed then use a cached entry for a page block
         * that had free pages as the basis for starting a linear scan.
         */
-       if (pfn == cc->migrate_pfn)
+       if (!found_block) {
+               cc->fast_search_fail++;
                pfn = reinit_migrate_pfn(cc);
-
+       }
        return pfn;
 }
 
@@ -1926,20 +1925,28 @@ static bool kswapd_is_running(pg_data_t *pgdat)
 
 /*
  * A zone's fragmentation score is the external fragmentation wrt to the
- * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value
- * in the range [0, 100].
+ * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100].
+ */
+static unsigned int fragmentation_score_zone(struct zone *zone)
+{
+       return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
+}
+
+/*
+ * A weighted zone's fragmentation score is the external fragmentation
+ * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
+ * returns a value in the range [0, 100].
  *
  * The scaling factor ensures that proactive compaction focuses on larger
  * zones like ZONE_NORMAL, rather than smaller, specialized zones like
  * ZONE_DMA32. For smaller zones, the score value remains close to zero,
  * and thus never exceeds the high threshold for proactive compaction.
  */
-static unsigned int fragmentation_score_zone(struct zone *zone)
+static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
 {
        unsigned long score;
 
-       score = zone->present_pages *
-                       extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
+       score = zone->present_pages * fragmentation_score_zone(zone);
        return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
 }
 
@@ -1959,7 +1966,7 @@ static unsigned int fragmentation_score_node(pg_data_t *pgdat)
                struct zone *zone;
 
                zone = &pgdat->node_zones[zoneid];
-               score += fragmentation_score_zone(zone);
+               score += fragmentation_score_zone_weighted(zone);
        }
 
        return score;