X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=mm%2Fcompaction.c;h=e78cb968842163ff65154346e6721aac91070319;hb=74f330bceaa7b88d06062e1cac3d519a3dfc041e;hp=2f42d952853970b5dd1f95a149d8750e7d570357;hpb=c062c4d1de57789bf15f7641a24c429eeb8a1c6a;p=linux-2.6-microblaze.git diff --git a/mm/compaction.c b/mm/compaction.c index 2f42d9528539..e78cb9688421 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -422,6 +422,17 @@ static void isolate_freepages(struct zone *zone, pfn -= pageblock_nr_pages) { unsigned long isolated; + /* + * Skip ahead if another thread is compacting in the area + * simultaneously. If we wrapped around, we can only skip + * ahead if zone->compact_cached_free_pfn also wrapped to + * above our starting point. + */ + if (cc->order > 0 && (!cc->wrapped || + zone->compact_cached_free_pfn > + cc->start_free_pfn)) + pfn = min(pfn, zone->compact_cached_free_pfn); + if (!pfn_valid(pfn)) continue; @@ -461,8 +472,11 @@ static void isolate_freepages(struct zone *zone, * looking for free pages, the search will restart here as * page migration may have returned some pages to the allocator */ - if (isolated) + if (isolated) { high_pfn = max(high_pfn, pfn); + if (cc->order > 0) + zone->compact_cached_free_pfn = high_pfn; + } } /* split_free_page does not map the pages */ @@ -556,6 +570,20 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, return ISOLATE_SUCCESS; } +/* + * Returns the start pfn of the last page block in a zone. This is the starting + * point for full compaction of a zone. Compaction searches for free pages from + * the end of each zone, while isolate_freepages_block scans forward inside each + * page block. + */ +static unsigned long start_free_pfn(struct zone *zone) +{ + unsigned long free_pfn; + free_pfn = zone->zone_start_pfn + zone->spanned_pages; + free_pfn &= ~(pageblock_nr_pages-1); + return free_pfn; +} + static int compact_finished(struct zone *zone, struct compact_control *cc) { @@ -565,8 +593,26 @@ static int compact_finished(struct zone *zone, if (fatal_signal_pending(current)) return COMPACT_PARTIAL; - /* Compaction run completes if the migrate and free scanner meet */ - if (cc->free_pfn <= cc->migrate_pfn) + /* + * A full (order == -1) compaction run starts at the beginning and + * end of a zone; it completes when the migrate and free scanner meet. + * A partial (order > 0) compaction can start with the free scanner + * at a random point in the zone, and may have to restart. + */ + if (cc->free_pfn <= cc->migrate_pfn) { + if (cc->order > 0 && !cc->wrapped) { + /* We started partway through; restart at the end. */ + unsigned long free_pfn = start_free_pfn(zone); + zone->compact_cached_free_pfn = free_pfn; + cc->free_pfn = free_pfn; + cc->wrapped = 1; + return COMPACT_CONTINUE; + } + return COMPACT_COMPLETE; + } + + /* We wrapped around and ended up where we started. */ + if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn) return COMPACT_COMPLETE; /* @@ -664,8 +710,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) /* Setup to move all movable pages to the end of the zone */ cc->migrate_pfn = zone->zone_start_pfn; - cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; - cc->free_pfn &= ~(pageblock_nr_pages-1); + + if (cc->order > 0) { + /* Incremental compaction. Start where the last one stopped. */ + cc->free_pfn = zone->compact_cached_free_pfn; + cc->start_free_pfn = cc->free_pfn; + } else { + /* Order == -1 starts at the end of the zone. */ + cc->free_pfn = start_free_pfn(zone); + } migrate_prep_local();