{
count_vm_events(item, delta);
}
+
+/*
+ * order == -1 is expected when compacting proactively via
+ * 1. /proc/sys/vm/compact_memory
+ * 2. /sys/devices/system/node/nodex/compact
+ * 3. /proc/sys/vm/compaction_proactiveness
+ */
+static inline bool is_via_compact_memory(int order)
+{
+ return order == -1;
+}
+
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
+static inline bool is_via_compact_memory(int order) { return false; }
#endif
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
-#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
-#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
/*
* Page order with-respect-to which proactive compaction
#define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
#endif
-static unsigned long release_freepages(struct list_head *freelist)
+static void split_map_pages(struct list_head *freepages)
{
+ unsigned int i, order;
struct page *page, *next;
- unsigned long high_pfn = 0;
+ LIST_HEAD(tmp_list);
- list_for_each_entry_safe(page, next, freelist, lru) {
- unsigned long pfn = page_to_pfn(page);
- list_del(&page->lru);
- __free_page(page);
- if (pfn > high_pfn)
- high_pfn = pfn;
- }
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ list_for_each_entry_safe(page, next, &freepages[order], lru) {
+ unsigned int nr_pages;
- return high_pfn;
+ list_del(&page->lru);
+
+ nr_pages = 1 << order;
+
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ if (order)
+ split_page(page, order);
+
+ for (i = 0; i < nr_pages; i++) {
+ list_add(&page->lru, &tmp_list);
+ page++;
+ }
+ }
+ list_splice_init(&tmp_list, &freepages[0]);
+ }
}
-static void split_map_pages(struct list_head *list)
+static unsigned long release_free_list(struct list_head *freepages)
{
- unsigned int i, order, nr_pages;
- struct page *page, *next;
- LIST_HEAD(tmp_list);
-
- list_for_each_entry_safe(page, next, list, lru) {
- list_del(&page->lru);
+ int order;
+ unsigned long high_pfn = 0;
- order = page_private(page);
- nr_pages = 1 << order;
+ for (order = 0; order < NR_PAGE_ORDERS; order++) {
+ struct page *page, *next;
- post_alloc_hook(page, order, __GFP_MOVABLE);
- if (order)
- split_page(page, order);
+ list_for_each_entry_safe(page, next, &freepages[order], lru) {
+ unsigned long pfn = page_to_pfn(page);
- for (i = 0; i < nr_pages; i++) {
- list_add(&page->lru, &tmp_list);
- page++;
+ list_del(&page->lru);
+ /*
+ * Convert free pages into post allocation pages, so
+ * that we can free them via __free_page.
+ */
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ __free_pages(page, order);
+ if (pfn > high_pfn)
+ high_pfn = pfn;
}
}
-
- list_splice(&tmp_list, list);
+ return high_pfn;
}
#ifdef CONFIG_COMPACTION
return false;
}
-EXPORT_SYMBOL(PageMovable);
void __SetPageMovable(struct page *page, const struct movable_operations *mops)
{
pageblock_start_pfn(zone_end_pfn(zone) - 1);
}
+#ifdef CONFIG_SPARSEMEM
+/*
+ * If the PFN falls into an offline section, return the start PFN of the
+ * next online section. If the PFN falls into an online section or if
+ * there is no next online section, return 0.
+ */
+static unsigned long skip_offline_sections(unsigned long start_pfn)
+{
+ unsigned long start_nr = pfn_to_section_nr(start_pfn);
+
+ if (online_section_nr(start_nr))
+ return 0;
+
+ while (++start_nr <= __highest_present_section_nr) {
+ if (online_section_nr(start_nr))
+ return section_nr_to_pfn(start_nr);
+ }
+
+ return 0;
+}
+
+/*
+ * If the PFN falls into an offline section, return the end PFN of the
+ * next online section in reverse. If the PFN falls into an online section
+ * or if there is no next online section in reverse, return 0.
+ */
+static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
+{
+ unsigned long start_nr = pfn_to_section_nr(start_pfn);
+
+ if (!start_nr || online_section_nr(start_nr))
+ return 0;
+
+ while (start_nr-- > 0) {
+ if (online_section_nr(start_nr))
+ return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION;
+ }
+
+ return 0;
+}
+#else
+static unsigned long skip_offline_sections(unsigned long start_pfn)
+{
+ return 0;
+}
+
+static unsigned long skip_offline_sections_reverse(unsigned long start_pfn)
+{
+ return 0;
+}
+#endif
+
/*
* Compound pages of >= pageblock_order should consistently be skipped until
* released. It is always pointless to compact pages of such order (if they are
bool source_set = false;
bool free_set = false;
+ /* Only flush if a full compaction finished recently */
if (!zone->compact_blockskip_flush)
return;
if (!populated_zone(zone))
continue;
- /* Only flush if a full compaction finished recently */
- if (zone->compact_blockskip_flush)
- __reset_isolation_suitable(zone);
+ __reset_isolation_suitable(zone);
}
}
* Sets the pageblock skip bit if it was clear. Note that this is a hint as
* locks are not required for read/writers. Returns true if it was already set.
*/
-static bool test_and_set_skip(struct compact_control *cc, struct page *page,
- unsigned long pfn)
+static bool test_and_set_skip(struct compact_control *cc, struct page *page)
{
bool skip;
- /* Do no update if skip hint is being ignored */
+ /* Do not update if skip hint is being ignored */
if (cc->ignore_skip_hint)
return false;
- if (!IS_ALIGNED(pfn, pageblock_nr_pages))
- return false;
-
skip = get_pageblock_skip(page);
if (!skip && !cc->no_set_skip_hint)
set_pageblock_skip(page);
{
struct zone *zone = cc->zone;
- pfn = pageblock_end_pfn(pfn);
-
/* Set for isolation rather than compaction */
if (cc->no_set_skip_hint)
return;
+ pfn = pageblock_end_pfn(pfn);
+
+ /* Update where async and sync compaction should restart */
if (pfn > zone->compact_cached_migrate_pfn[0])
zone->compact_cached_migrate_pfn[0] = pfn;
if (cc->mode != MIGRATE_ASYNC &&
if (cc->no_set_skip_hint)
return;
- if (!page)
- return;
-
set_pageblock_skip(page);
- /* Update where async and sync compaction should restart */
if (pfn < zone->compact_cached_free_pfn)
zone->compact_cached_free_pfn = pfn;
}
{
}
-static bool test_and_set_skip(struct compact_control *cc, struct page *page,
- unsigned long pfn)
+static bool test_and_set_skip(struct compact_control *cc, struct page *page)
{
return false;
}
bool strict)
{
int nr_scanned = 0, total_isolated = 0;
- struct page *cursor;
+ struct page *page;
unsigned long flags = 0;
bool locked = false;
unsigned long blockpfn = *start_pfn;
if (strict)
stride = 1;
- cursor = pfn_to_page(blockpfn);
+ page = pfn_to_page(blockpfn);
/* Isolate free pages. */
- for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
+ for (; blockpfn < end_pfn; blockpfn += stride, page += stride) {
int isolated;
- struct page *page = cursor;
/*
* Periodically drop the lock (if held) regardless of its
if (PageCompound(page)) {
const unsigned int order = compound_order(page);
- if (likely(order < MAX_ORDER)) {
+ if (blockpfn + (1UL << order) <= end_pfn) {
blockpfn += (1UL << order) - 1;
- cursor += (1UL << order) - 1;
+ page += (1UL << order) - 1;
+ nr_scanned += (1UL << order) - 1;
}
+
goto isolate_fail;
}
nr_scanned += isolated - 1;
total_isolated += isolated;
cc->nr_freepages += isolated;
- list_add_tail(&page->lru, freelist);
+ list_add_tail(&page->lru, &freelist[order]);
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
blockpfn += isolated;
}
/* Advance to the end of split page */
blockpfn += isolated - 1;
- cursor += isolated - 1;
+ page += isolated - 1;
continue;
isolate_fail:
if (strict)
break;
- else
- continue;
}
spin_unlock_irqrestore(&cc->zone->lock, flags);
/*
- * There is a tiny chance that we have read bogus compound_order(),
- * so be careful to not go outside of the pageblock.
+ * Be careful to not go outside of the pageblock.
*/
if (unlikely(blockpfn > end_pfn))
blockpfn = end_pfn;
unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
- LIST_HEAD(freelist);
+ int order;
+ struct list_head tmp_freepages[NR_PAGE_ORDERS];
+
+ for (order = 0; order < NR_PAGE_ORDERS; order++)
+ INIT_LIST_HEAD(&tmp_freepages[order]);
pfn = start_pfn;
block_start_pfn = pageblock_start_pfn(pfn);
/* Protect pfn from changing by isolate_freepages_block */
unsigned long isolate_start_pfn = pfn;
- block_end_pfn = min(block_end_pfn, end_pfn);
-
/*
* pfn could pass the block_end_pfn if isolated freepage
* is more than pageblock order. In this case, we adjust
if (pfn >= block_end_pfn) {
block_start_pfn = pageblock_start_pfn(pfn);
block_end_pfn = pageblock_end_pfn(pfn);
- block_end_pfn = min(block_end_pfn, end_pfn);
}
+ block_end_pfn = min(block_end_pfn, end_pfn);
+
if (!pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone))
break;
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, &freelist, 0, true);
+ block_end_pfn, tmp_freepages, 0, true);
/*
* In strict mode, isolate_freepages_block() returns 0 if
*/
}
- /* __isolate_free_page() does not map the pages */
- split_map_pages(&freelist);
-
if (pfn < end_pfn) {
/* Loop terminated early, cleanup. */
- release_freepages(&freelist);
+ release_free_list(tmp_freepages);
return 0;
}
+ /* __isolate_free_page() does not map the pages */
+ split_map_pages(tmp_freepages);
+
/* We don't use freelists for anything. */
return pfn;
}
/* Similar to reclaim, but different enough that they don't share logic */
-static bool too_many_isolated(pg_data_t *pgdat)
+static bool too_many_isolated(struct compact_control *cc)
{
+ pg_data_t *pgdat = cc->zone->zone_pgdat;
bool too_many;
unsigned long active, inactive, isolated;
isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
node_page_state(pgdat, NR_ISOLATED_ANON);
+ /*
+ * Allow GFP_NOFS to isolate past the limit set for regular
+ * compaction runs. This prevents an ABBA deadlock when other
+ * compactors have already isolated to the limit, but are
+ * blocked on filesystem locks held by the GFP_NOFS thread.
+ */
+ if (cc->gfp_mask & __GFP_FS) {
+ inactive >>= 3;
+ active >>= 3;
+ }
+
too_many = isolated > (inactive + active) / 2;
if (!too_many)
wake_throttle_isolated(pgdat);
return too_many;
}
+/**
+ * skip_isolation_on_order() - determine when to skip folio isolation based on
+ * folio order and compaction target order
+ * @order: to-be-isolated folio order
+ * @target_order: compaction target order
+ *
+ * This avoids unnecessary folio isolations during compaction.
+ */
+static bool skip_isolation_on_order(int order, int target_order)
+{
+ /*
+ * Unless we are performing global compaction (i.e.,
+ * is_via_compact_memory), skip any folios that are larger than the
+ * target order: we wouldn't be here if we'd have a free folio with
+ * the desired target_order, so migrating this folio would likely fail
+ * later.
+ */
+ if (!is_via_compact_memory(target_order) && order >= target_order)
+ return true;
+ /*
+ * We limit memory compaction to pageblocks and won't try
+ * creating free blocks of memory that are larger than that.
+ */
+ return order >= pageblock_order;
+}
+
/**
* isolate_migratepages_block() - isolate all migrate-able pages within
* a single pageblock
struct lruvec *lruvec;
unsigned long flags = 0;
struct lruvec *locked = NULL;
+ struct folio *folio = NULL;
struct page *page = NULL, *valid_page = NULL;
struct address_space *mapping;
unsigned long start_pfn = low_pfn;
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
- while (unlikely(too_many_isolated(pgdat))) {
+ while (unlikely(too_many_isolated(cc))) {
/* stop isolation if there are still pages not migrated */
if (cc->nr_migratepages)
return -EAGAIN;
/* Time to isolate some pages for migration */
for (; low_pfn < end_pfn; low_pfn++) {
+ bool is_dirty, is_unevictable;
if (skip_on_failure && low_pfn >= next_skip_pfn) {
/*
/*
* Check if the pageblock has already been marked skipped.
- * Only the aligned PFN is checked as the caller isolates
+ * Only the first PFN is checked as the caller isolates
* COMPACT_CLUSTER_MAX at a time so the second call must
* not falsely conclude that the block should be skipped.
*/
- if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
+ if (!valid_page && (pageblock_aligned(low_pfn) ||
+ low_pfn == cc->zone->zone_start_pfn)) {
if (!isolation_suitable(cc, page)) {
low_pfn = end_pfn;
- page = NULL;
+ folio = NULL;
goto isolate_abort;
}
valid_page = page;
}
- if (PageHuge(page) && cc->alloc_contig) {
+ if (PageHuge(page)) {
+ /*
+ * skip hugetlbfs if we are not compacting for pages
+ * bigger than its order. THPs and other compound pages
+ * are handled below.
+ */
+ if (!cc->alloc_contig) {
+ const unsigned int order = compound_order(page);
+
+ if (order <= MAX_PAGE_ORDER) {
+ low_pfn += (1UL << order) - 1;
+ nr_scanned += (1UL << order) - 1;
+ }
+ goto isolate_fail;
+ }
+ /* for alloc_contig case */
+ if (locked) {
+ unlock_page_lruvec_irqrestore(locked, flags);
+ locked = NULL;
+ }
+
ret = isolate_or_dissolve_huge_page(page, &cc->migratepages);
/*
if (ret == -EBUSY)
ret = 0;
low_pfn += compound_nr(page) - 1;
+ nr_scanned += compound_nr(page) - 1;
goto isolate_fail;
}
* Hugepage was successfully isolated and placed
* on the cc->migratepages list.
*/
- low_pfn += compound_nr(page) - 1;
+ folio = page_folio(page);
+ low_pfn += folio_nr_pages(folio) - 1;
goto isolate_success_no_list;
}
* a valid page order. Consider only values in the
* valid order range to prevent low_pfn overflow.
*/
- if (freepage_order > 0 && freepage_order < MAX_ORDER)
+ if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) {
low_pfn += (1UL << freepage_order) - 1;
+ nr_scanned += (1UL << freepage_order) - 1;
+ }
continue;
}
/*
- * Regardless of being on LRU, compound pages such as THP and
- * hugetlbfs are not to be compacted unless we are attempting
- * an allocation much larger than the huge page size (eg CMA).
- * We can potentially save a lot of iterations if we skip them
- * at once. The check is racy, but we can consider only valid
- * values and the only danger is skipping too much.
+ * Regardless of being on LRU, compound pages such as THP
+ * (hugetlbfs is handled above) are not to be compacted unless
+ * we are attempting an allocation larger than the compound
+ * page size. We can potentially save a lot of iterations if we
+ * skip them at once. The check is racy, but we can consider
+ * only valid values and the only danger is skipping too much.
*/
if (PageCompound(page) && !cc->alloc_contig) {
const unsigned int order = compound_order(page);
- if (likely(order < MAX_ORDER))
- low_pfn += (1UL << order) - 1;
- goto isolate_fail;
+ /* Skip based on page order and compaction target order. */
+ if (skip_isolation_on_order(order, cc->order)) {
+ if (order <= MAX_PAGE_ORDER) {
+ low_pfn += (1UL << order) - 1;
+ nr_scanned += (1UL << order) - 1;
+ }
+ goto isolate_fail;
+ }
}
/*
locked = NULL;
}
- if (!isolate_movable_page(page, mode))
+ if (isolate_movable_page(page, mode)) {
+ folio = page_folio(page);
goto isolate_success;
+ }
}
goto isolate_fail;
}
+ /*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ folio = folio_get_nontail_page(page);
+ if (unlikely(!folio))
+ goto isolate_fail;
+
/*
* Migration will fail if an anonymous page is pinned in memory,
* so avoid taking lru_lock and isolating it unnecessarily in an
* admittedly racy check.
*/
- mapping = page_mapping(page);
- if (!mapping && page_count(page) > page_mapcount(page))
- goto isolate_fail;
+ mapping = folio_mapping(folio);
+ if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio))
+ goto isolate_fail_put;
/*
* Only allow to migrate anonymous pages in GFP_NOFS context
* because those do not depend on fs locks.
*/
if (!(cc->gfp_mask & __GFP_FS) && mapping)
- goto isolate_fail;
-
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- if (unlikely(!get_page_unless_zero(page)))
- goto isolate_fail;
+ goto isolate_fail_put;
/* Only take pages on LRU: a check now makes later tests safe */
- if (!PageLRU(page))
+ if (!folio_test_lru(folio))
goto isolate_fail_put;
+ is_unevictable = folio_test_unevictable(folio);
+
/* Compaction might skip unevictable pages but CMA takes them */
- if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page))
+ if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable)
goto isolate_fail_put;
/*
* it will be able to migrate without blocking - clean pages
* for the most part. PageWriteback would require blocking.
*/
- if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page))
+ if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
goto isolate_fail_put;
- if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) {
- bool migrate_dirty;
+ is_dirty = folio_test_dirty(folio);
+
+ if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) ||
+ (mapping && is_unevictable)) {
+ bool migrate_dirty = true;
+ bool is_unmovable;
/*
- * Only pages without mappings or that have a
- * ->migrate_folio callback are possible to migrate
- * without blocking. However, we can be racing with
- * truncation so it's necessary to lock the page
- * to stabilise the mapping as truncation holds
- * the page lock until after the page is removed
- * from the page cache.
+ * Only folios without mappings or that have
+ * a ->migrate_folio callback are possible to migrate
+ * without blocking.
+ *
+ * Folios from unmovable mappings are not migratable.
+ *
+ * However, we can be racing with truncation, which can
+ * free the mapping that we need to check. Truncation
+ * holds the folio lock until after the folio is removed
+ * from the page so holding it ourselves is sufficient.
+ *
+ * To avoid locking the folio just to check unmovable,
+ * assume every unmovable folio is also unevictable,
+ * which is a cheaper test. If our assumption goes
+ * wrong, it's not a correctness bug, just potentially
+ * wasted cycles.
*/
- if (!trylock_page(page))
+ if (!folio_trylock(folio))
goto isolate_fail_put;
- mapping = page_mapping(page);
- migrate_dirty = !mapping ||
- mapping->a_ops->migrate_folio;
- unlock_page(page);
- if (!migrate_dirty)
+ mapping = folio_mapping(folio);
+ if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) {
+ migrate_dirty = !mapping ||
+ mapping->a_ops->migrate_folio;
+ }
+ is_unmovable = mapping && mapping_unmovable(mapping);
+ folio_unlock(folio);
+ if (!migrate_dirty || is_unmovable)
goto isolate_fail_put;
}
- /* Try isolate the page */
- if (!TestClearPageLRU(page))
+ /* Try isolate the folio */
+ if (!folio_test_clear_lru(folio))
goto isolate_fail_put;
- lruvec = folio_lruvec(page_folio(page));
+ lruvec = folio_lruvec(folio);
/* If we already hold the lock, we can skip some rechecking */
if (lruvec != locked) {
compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
locked = lruvec;
- lruvec_memcg_debug(lruvec, page_folio(page));
+ lruvec_memcg_debug(lruvec, folio);
- /* Try get exclusive access under lock */
- if (!skip_updated) {
+ /*
+ * Try get exclusive access under lock. If marked for
+ * skip, the scan is aborted unless the current context
+ * is a rescan to reach the end of the pageblock.
+ */
+ if (!skip_updated && valid_page) {
skip_updated = true;
- if (test_and_set_skip(cc, page, low_pfn))
+ if (test_and_set_skip(cc, valid_page) &&
+ !cc->finish_pageblock) {
+ low_pfn = end_pfn;
goto isolate_abort;
+ }
}
/*
- * Page become compound since the non-locked check,
- * and it's on LRU. It can only be a THP so the order
- * is safe to read and it's 0 for tail pages.
+ * Check LRU folio order under the lock
*/
- if (unlikely(PageCompound(page) && !cc->alloc_contig)) {
- low_pfn += compound_nr(page) - 1;
- SetPageLRU(page);
+ if (unlikely(skip_isolation_on_order(folio_order(folio),
+ cc->order) &&
+ !cc->alloc_contig)) {
+ low_pfn += folio_nr_pages(folio) - 1;
+ nr_scanned += folio_nr_pages(folio) - 1;
+ folio_set_lru(folio);
goto isolate_fail_put;
}
}
- /* The whole page is taken off the LRU; skip the tail pages. */
- if (PageCompound(page))
- low_pfn += compound_nr(page) - 1;
+ /* The folio is taken off the LRU */
+ if (folio_test_large(folio))
+ low_pfn += folio_nr_pages(folio) - 1;
/* Successfully isolated */
- del_page_from_lru_list(page, lruvec);
- mod_node_page_state(page_pgdat(page),
- NR_ISOLATED_ANON + page_is_file_lru(page),
- thp_nr_pages(page));
+ lruvec_del_folio(lruvec, folio);
+ node_stat_mod_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ folio_nr_pages(folio));
isolate_success:
- list_add(&page->lru, &cc->migratepages);
+ list_add(&folio->lru, &cc->migratepages);
isolate_success_no_list:
- cc->nr_migratepages += compound_nr(page);
- nr_isolated += compound_nr(page);
- nr_scanned += compound_nr(page) - 1;
+ cc->nr_migratepages += folio_nr_pages(folio);
+ nr_isolated += folio_nr_pages(folio);
+ nr_scanned += folio_nr_pages(folio) - 1;
/*
* Avoid isolating too much unless this block is being
- * rescanned (e.g. dirty/writeback pages, parallel allocation)
+ * fully scanned (e.g. dirty/writeback pages, parallel allocation)
* or a lock is contended. For contention, isolate quickly to
* potentially remove one source of contention.
*/
if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX &&
- !cc->rescan && !cc->contended) {
+ !cc->finish_pageblock && !cc->contended) {
++low_pfn;
break;
}
unlock_page_lruvec_irqrestore(locked, flags);
locked = NULL;
}
- put_page(page);
+ folio_put(folio);
isolate_fail:
if (!skip_on_failure && ret != -ENOMEM)
if (unlikely(low_pfn > end_pfn))
low_pfn = end_pfn;
- page = NULL;
+ folio = NULL;
isolate_abort:
if (locked)
unlock_page_lruvec_irqrestore(locked, flags);
- if (page) {
- SetPageLRU(page);
- put_page(page);
+ if (folio) {
+ folio_set_lru(folio);
+ folio_put(folio);
}
/*
- * Updated the cached scanner pfn once the pageblock has been scanned
+ * Update the cached scanner pfn once the pageblock has been scanned.
* Pages will either be migrated in which case there is no point
* scanning in the near future or migration failed in which case the
* failure reason may persist. The block is marked for skipping if
* there were no pages isolated in the block or if the block is
* rescanned twice in a row.
*/
- if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
- if (valid_page && !skip_updated)
+ if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) {
+ if (!cc->no_set_skip_hint && valid_page && !skip_updated)
set_pageblock_skip(valid_page);
update_cached_migrate(cc, low_pfn);
}
{
/* If the page is a large free page, then disallow migration */
if (PageBuddy(page)) {
+ int order = cc->order > 0 ? cc->order : pageblock_order;
+
/*
* We are checking page_order without zone->lock taken. But
* the only small danger is that we skip a potentially suitable
* pageblock, so it's not worth to check order for valid range.
*/
- if (buddy_order_unsafe(page) >= pageblock_order)
+ if (buddy_order_unsafe(page) >= order)
return false;
}
{
LIST_HEAD(sublist);
- if (!list_is_last(freelist, &freepage->lru)) {
- list_cut_before(&sublist, freelist, &freepage->lru);
+ if (!list_is_first(&freepage->buddy_list, freelist)) {
+ list_cut_before(&sublist, freelist, &freepage->buddy_list);
list_splice_tail(&sublist, freelist);
}
}
{
LIST_HEAD(sublist);
- if (!list_is_first(freelist, &freepage->lru)) {
- list_cut_position(&sublist, freelist, &freepage->lru);
+ if (!list_is_last(&freepage->buddy_list, freelist)) {
+ list_cut_position(&sublist, freelist, &freepage->buddy_list);
list_splice_tail(&sublist, freelist);
}
}
static void
-fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
+fast_isolate_around(struct compact_control *cc, unsigned long pfn)
{
unsigned long start_pfn, end_pfn;
struct page *page;
if (!page)
return;
- /* Scan before */
- if (start_pfn != pfn) {
- isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
- if (cc->nr_freepages >= cc->nr_migratepages)
- return;
- }
-
- /* Scan after */
- start_pfn = pfn + nr_isolated;
- if (start_pfn < end_pfn)
- isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
+ isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false);
/* Skip this pageblock in the future as it's full or nearly full */
- if (cc->nr_freepages < cc->nr_migratepages)
+ if (start_pfn == end_pfn && !cc->no_set_skip_hint)
set_pageblock_skip(page);
}
return order;
}
-static unsigned long
-fast_isolate_freepages(struct compact_control *cc)
+static void fast_isolate_freepages(struct compact_control *cc)
{
unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1);
- unsigned int nr_scanned = 0;
+ unsigned int nr_scanned = 0, total_isolated = 0;
unsigned long low_pfn, min_pfn, highest = 0;
unsigned long nr_isolated = 0;
unsigned long distance;
/* Full compaction passes in a negative order */
if (cc->order <= 0)
- return cc->free_pfn;
+ return;
/*
* If starting the scan, use a deeper search and use the highest
spin_lock_irqsave(&cc->zone->lock, flags);
freelist = &area->free_list[MIGRATE_MOVABLE];
- list_for_each_entry_reverse(freepage, freelist, lru) {
+ list_for_each_entry_reverse(freepage, freelist, buddy_list) {
unsigned long pfn;
order_scanned++;
break;
}
- /* Use a minimum pfn if a preferred one was not found */
+ /* Use a maximum candidate pfn if a preferred one was not found */
if (!page && high_pfn) {
page = pfn_to_page(high_pfn);
set_page_private(page, order);
nr_isolated = 1 << order;
nr_scanned += nr_isolated - 1;
+ total_isolated += nr_isolated;
cc->nr_freepages += nr_isolated;
- list_add_tail(&page->lru, &cc->freepages);
+ list_add_tail(&page->lru, &cc->freepages[order]);
count_compact_events(COMPACTISOLATED, nr_isolated);
} else {
/* If isolation fails, abort the search */
spin_unlock_irqrestore(&cc->zone->lock, flags);
+ /* Skip fast search if enough freepages isolated */
+ if (cc->nr_freepages >= cc->nr_migratepages)
+ break;
+
/*
* Smaller scan on next order so the total scan is related
* to freelist_scan_limit.
limit = max(1U, limit >> 1);
}
+ trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn,
+ nr_scanned, total_isolated);
+
if (!page) {
cc->fast_search_fail++;
if (scan_start) {
min(pageblock_end_pfn(min_pfn),
zone_end_pfn(cc->zone)),
cc->zone);
+ if (page && !suitable_migration_target(cc, page))
+ page = NULL;
+
cc->free_pfn = min_pfn;
}
}
cc->total_free_scanned += nr_scanned;
if (!page)
- return cc->free_pfn;
+ return;
low_pfn = page_to_pfn(page);
- fast_isolate_around(cc, low_pfn, nr_isolated);
- return low_pfn;
+ fast_isolate_around(cc, low_pfn);
}
/*
unsigned long isolate_start_pfn; /* exact pfn we start at */
unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */
- struct list_head *freelist = &cc->freepages;
unsigned int stride;
/* Try a small search of the free lists for a candidate */
fast_isolate_freepages(cc);
if (cc->nr_freepages)
- goto splitmap;
+ return;
/*
* Initialise the free scanner. The starting point is where we last
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
- if (!page)
+ if (!page) {
+ unsigned long next_pfn;
+
+ next_pfn = skip_offline_sections_reverse(block_start_pfn);
+ if (next_pfn)
+ block_start_pfn = max(next_pfn, low_pfn);
+
continue;
+ }
/* Check the block is suitable for migration */
if (!suitable_migration_target(cc, page))
/* Found a block suitable for isolating free pages from. */
nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
- block_end_pfn, freelist, stride, false);
+ block_end_pfn, cc->freepages, stride, false);
/* Update the skip hint if the full pageblock was scanned */
if (isolate_start_pfn == block_end_pfn)
- update_pageblock_skip(cc, page, block_start_pfn);
+ update_pageblock_skip(cc, page, block_start_pfn -
+ pageblock_nr_pages);
/* Are enough freepages isolated? */
if (cc->nr_freepages >= cc->nr_migratepages) {
* and the loop terminated due to isolate_start_pfn < low_pfn
*/
cc->free_pfn = isolate_start_pfn;
-
-splitmap:
- /* __isolate_free_page() does not map the pages */
- split_map_pages(freelist);
}
/*
* This is a migrate-callback that "allocates" freepages by taking pages
* from the isolated freelists in the block we are migrating to.
*/
-static struct page *compaction_alloc(struct page *migratepage,
- unsigned long data)
+static struct folio *compaction_alloc(struct folio *src, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
+ struct folio *dst;
+ int order = folio_order(src);
+ bool has_isolated_pages = false;
+ int start_order;
struct page *freepage;
+ unsigned long size;
- if (list_empty(&cc->freepages)) {
- isolate_freepages(cc);
+again:
+ for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++)
+ if (!list_empty(&cc->freepages[start_order]))
+ break;
- if (list_empty(&cc->freepages))
+ /* no free pages in the list */
+ if (start_order == NR_PAGE_ORDERS) {
+ if (has_isolated_pages)
return NULL;
+ isolate_freepages(cc);
+ has_isolated_pages = true;
+ goto again;
}
- freepage = list_entry(cc->freepages.next, struct page, lru);
+ freepage = list_first_entry(&cc->freepages[start_order], struct page,
+ lru);
+ size = 1 << start_order;
+
list_del(&freepage->lru);
- cc->nr_freepages--;
- return freepage;
+ while (start_order > order) {
+ start_order--;
+ size >>= 1;
+
+ list_add(&freepage[size].lru, &cc->freepages[start_order]);
+ set_page_private(&freepage[size], start_order);
+ }
+ dst = (struct folio *)freepage;
+
+ post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
+ if (order)
+ prep_compound_page(&dst->page, order);
+ cc->nr_freepages -= 1 << order;
+ cc->nr_migratepages -= 1 << order;
+ return page_rmappable_folio(&dst->page);
}
/*
* freelist. All pages on the freelist are from the same zone, so there is no
* special handling needed for NUMA.
*/
-static void compaction_free(struct page *page, unsigned long data)
+static void compaction_free(struct folio *dst, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
+ int order = folio_order(dst);
+ struct page *page = &dst->page;
- list_add(&page->lru, &cc->freepages);
- cc->nr_freepages++;
+ if (folio_put_testzero(dst)) {
+ free_pages_prepare(page, order);
+ list_add(&dst->lru, &cc->freepages[order]);
+ cc->nr_freepages += 1 << order;
+ }
+ cc->nr_migratepages += 1 << order;
+ /*
+ * someone else has referenced the page, we cannot take it back to our
+ * free list.
+ */
}
/* possible outcome of isolate_migratepages */
* Allow userspace to control policy on scanning the unevictable LRU for
* compactable pages.
*/
-#ifdef CONFIG_PREEMPT_RT
-int sysctl_compact_unevictable_allowed __read_mostly = 0;
-#else
-int sysctl_compact_unevictable_allowed __read_mostly = 1;
-#endif
+static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT;
+/*
+ * Tunable for proactive compaction. It determines how
+ * aggressively the kernel should compact memory in the
+ * background. It takes values in the range [0, 100].
+ */
+static unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
+static int sysctl_extfrag_threshold = 500;
+static int __read_mostly sysctl_compact_memory;
static inline void
update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
if (cc->ignore_skip_hint)
return pfn;
+ /*
+ * If the pageblock should be finished then do not select a different
+ * pageblock.
+ */
+ if (cc->finish_pageblock)
+ return pfn;
+
/*
* If the migrate_pfn is not at the start of a zone or the start
* of a pageblock then assume this is a continuation of a previous
spin_lock_irqsave(&cc->zone->lock, flags);
freelist = &area->free_list[MIGRATE_MOVABLE];
- list_for_each_entry(freepage, freelist, lru) {
+ list_for_each_entry(freepage, freelist, buddy_list) {
unsigned long free_pfn;
if (nr_scanned++ >= limit) {
pfn = cc->zone->zone_start_pfn;
cc->fast_search_fail = 0;
found_block = true;
- set_pageblock_skip(freepage);
break;
}
}
block_start_pfn = cc->zone->zone_start_pfn;
/*
- * fast_find_migrateblock marks a pageblock skipped so to avoid
- * the isolation_suitable check below, check whether the fast
- * search was successful.
+ * fast_find_migrateblock() has already ensured the pageblock is not
+ * set with a skipped flag, so to avoid the isolation_suitable check
+ * below again, check whether the fast search was successful.
*/
fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;
page = pageblock_pfn_to_page(block_start_pfn,
block_end_pfn, cc->zone);
- if (!page)
+ if (!page) {
+ unsigned long next_pfn;
+
+ next_pfn = skip_offline_sections(block_start_pfn);
+ if (next_pfn)
+ block_end_pfn = min(next_pfn, cc->free_pfn);
continue;
+ }
/*
* If isolation recently failed, do not retry. Only check the
* before making it "skip" so other compaction instances do
* not scan the same block.
*/
- if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
+ if ((pageblock_aligned(low_pfn) ||
+ low_pfn == cc->zone->zone_start_pfn) &&
!fast_find_block && !isolation_suitable(cc, page))
continue;
}
/*
- * order == -1 is expected when compacting via
- * /proc/sys/vm/compact_memory
+ * Determine whether kswapd is (or recently was!) running on this node.
+ *
+ * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
+ * zero it.
*/
-static inline bool is_via_compact_memory(int order)
-{
- return order == -1;
-}
-
static bool kswapd_is_running(pg_data_t *pgdat)
{
- return pgdat->kswapd && task_is_running(pgdat->kswapd);
+ bool running;
+
+ pgdat_kswapd_lock(pgdat);
+ running = pgdat->kswapd && task_is_running(pgdat->kswapd);
+ pgdat_kswapd_unlock(pgdat);
+
+ return running;
}
/*
struct zone *zone;
zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
+ continue;
score += fragmentation_score_zone_weighted(zone);
}
return score;
}
-static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low)
+static unsigned int fragmentation_score_wmark(bool low)
{
unsigned int wmark_low;
if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat))
return false;
- wmark_high = fragmentation_score_wmark(pgdat, false);
+ wmark_high = fragmentation_score_wmark(false);
return fragmentation_score_node(pgdat) > wmark_high;
}
return COMPACT_PARTIAL_SKIPPED;
score = fragmentation_score_zone(cc->zone);
- wmark_low = fragmentation_score_wmark(pgdat, true);
+ wmark_low = fragmentation_score_wmark(true);
if (score > wmark_low)
ret = COMPACT_CONTINUE;
* migration source is unmovable/reclaimable but it's not worth
* special casing.
*/
- if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
+ if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE;
/* Direct compactor: Is a suitable page free? */
ret = COMPACT_NO_SUITABLE_PAGE;
- for (order = cc->order; order < MAX_ORDER; order++) {
+ for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &cc->zone->free_area[order];
bool can_steal;
return ret;
}
-static enum compact_result __compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags,
- int highest_zoneidx,
- unsigned long wmark_target)
+static bool __compaction_suitable(struct zone *zone, int order,
+ int highest_zoneidx,
+ unsigned long wmark_target)
{
unsigned long watermark;
-
- if (is_via_compact_memory(order))
- return COMPACT_CONTINUE;
-
- watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
- /*
- * If watermarks for high-order allocation are already met, there
- * should be no need for compaction at all.
- */
- if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
- alloc_flags))
- return COMPACT_SUCCESS;
-
/*
* Watermarks for order-0 must be met for compaction to be able to
* isolate free pages for migration targets. This means that the
watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
low_wmark_pages(zone) : min_wmark_pages(zone);
watermark += compact_gap(order);
- if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
- ALLOC_CMA, wmark_target))
- return COMPACT_SKIPPED;
-
- return COMPACT_CONTINUE;
+ return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
+ ALLOC_CMA, wmark_target);
}
/*
* compaction_suitable: Is this suitable to run compaction on this zone now?
- * Returns
- * COMPACT_SKIPPED - If there are too few free pages for compaction
- * COMPACT_SUCCESS - If the allocation would succeed without compaction
- * COMPACT_CONTINUE - If compaction should run now
*/
-enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags,
- int highest_zoneidx)
+bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
{
- enum compact_result ret;
- int fragindex;
+ enum compact_result compact_result;
+ bool suitable;
- ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx,
- zone_page_state(zone, NR_FREE_PAGES));
+ suitable = __compaction_suitable(zone, order, highest_zoneidx,
+ zone_page_state(zone, NR_FREE_PAGES));
/*
* fragmentation index determines if allocation failures are due to
* low memory or external fragmentation
* excessive compaction for costly orders, but it should not be at the
* expense of system stability.
*/
- if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
- fragindex = fragmentation_index(zone, order);
- if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
- ret = COMPACT_NOT_SUITABLE_ZONE;
+ if (suitable) {
+ compact_result = COMPACT_CONTINUE;
+ if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ int fragindex = fragmentation_index(zone, order);
+
+ if (fragindex >= 0 &&
+ fragindex <= sysctl_extfrag_threshold) {
+ suitable = false;
+ compact_result = COMPACT_NOT_SUITABLE_ZONE;
+ }
+ }
+ } else {
+ compact_result = COMPACT_SKIPPED;
}
- trace_mm_compaction_suitable(zone, order, ret);
- if (ret == COMPACT_NOT_SUITABLE_ZONE)
- ret = COMPACT_SKIPPED;
+ trace_mm_compaction_suitable(zone, order, compact_result);
- return ret;
+ return suitable;
}
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
ac->highest_zoneidx, ac->nodemask) {
unsigned long available;
- enum compact_result compact_result;
/*
* Do not consider all the reclaimable memory because we do not
*/
available = zone_reclaimable_pages(zone) / order;
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
- compact_result = __compaction_suitable(zone, order, alloc_flags,
- ac->highest_zoneidx, available);
- if (compact_result == COMPACT_CONTINUE)
+ if (__compaction_suitable(zone, order, ac->highest_zoneidx,
+ available))
return true;
}
return false;
}
+/*
+ * Should we do compaction for target allocation order.
+ * Return COMPACT_SUCCESS if allocation for target order can be already
+ * satisfied
+ * Return COMPACT_SKIPPED if compaction for target order is likely to fail
+ * Return COMPACT_CONTINUE if compaction for target order should be ran
+ */
+static enum compact_result
+compaction_suit_allocation_order(struct zone *zone, unsigned int order,
+ int highest_zoneidx, unsigned int alloc_flags)
+{
+ unsigned long watermark;
+
+ watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
+ if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
+ alloc_flags))
+ return COMPACT_SUCCESS;
+
+ if (!compaction_suitable(zone, order, highest_zoneidx))
+ return COMPACT_SKIPPED;
+
+ return COMPACT_CONTINUE;
+}
+
static enum compact_result
compact_zone(struct compact_control *cc, struct capture_control *capc)
{
unsigned long last_migrated_pfn;
const bool sync = cc->mode != MIGRATE_ASYNC;
bool update_cached;
- unsigned int nr_succeeded = 0;
+ unsigned int nr_succeeded = 0, nr_migratepages;
+ int order;
/*
* These counters track activities during zone compaction. Initialize
cc->total_free_scanned = 0;
cc->nr_migratepages = 0;
cc->nr_freepages = 0;
- INIT_LIST_HEAD(&cc->freepages);
+ for (order = 0; order < NR_PAGE_ORDERS; order++)
+ INIT_LIST_HEAD(&cc->freepages[order]);
INIT_LIST_HEAD(&cc->migratepages);
cc->migratetype = gfp_migratetype(cc->gfp_mask);
- ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
- cc->highest_zoneidx);
- /* Compaction is likely to fail */
- if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
- return ret;
- /* huh, compaction_suitable is returning something unexpected */
- VM_BUG_ON(ret != COMPACT_CONTINUE);
+ if (!is_via_compact_memory(cc->order)) {
+ ret = compaction_suit_allocation_order(cc->zone, cc->order,
+ cc->highest_zoneidx,
+ cc->alloc_flags);
+ if (ret != COMPACT_CONTINUE)
+ return ret;
+ }
/*
* Clear pageblock skip if there were failures recently and compaction
unsigned long iteration_start_pfn = cc->migrate_pfn;
/*
- * Avoid multiple rescans which can happen if a page cannot be
- * isolated (dirty/writeback in async mode) or if the migrated
- * pages are being allocated before the pageblock is cleared.
- * The first rescan will capture the entire pageblock for
- * migration. If it fails, it'll be marked skip and scanning
- * will proceed as normal.
+ * Avoid multiple rescans of the same pageblock which can
+ * happen if a page cannot be isolated (dirty/writeback in
+ * async mode) or if the migrated pages are being allocated
+ * before the pageblock is cleared. The first rescan will
+ * capture the entire pageblock for migration. If it fails,
+ * it'll be marked skip and scanning will proceed as normal.
*/
- cc->rescan = false;
+ cc->finish_pageblock = false;
if (pageblock_start_pfn(last_migrated_pfn) ==
pageblock_start_pfn(iteration_start_pfn)) {
- cc->rescan = true;
+ cc->finish_pageblock = true;
}
+rescan:
switch (isolate_migratepages(cc)) {
case ISOLATE_ABORT:
ret = COMPACT_CONTENDED;
goto check_drain;
case ISOLATE_SUCCESS:
update_cached = false;
- last_migrated_pfn = iteration_start_pfn;
+ last_migrated_pfn = max(cc->zone->zone_start_pfn,
+ pageblock_start_pfn(cc->migrate_pfn - 1));
}
+ /*
+ * Record the number of pages to migrate since the
+ * compaction_alloc/free() will update cc->nr_migratepages
+ * properly.
+ */
+ nr_migratepages = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
compaction_free, (unsigned long)cc, cc->mode,
MR_COMPACTION, &nr_succeeded);
- trace_mm_compaction_migratepages(cc, nr_succeeded);
+ trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded);
/* All pages were either migrated or will be released */
cc->nr_migratepages = 0;
goto out;
}
/*
- * We failed to migrate at least one page in the current
- * order-aligned block, so skip the rest of it.
+ * If an ASYNC or SYNC_LIGHT fails to migrate a page
+ * within the pageblock_order-aligned block and
+ * fast_find_migrateblock may be used then scan the
+ * remainder of the pageblock. This will mark the
+ * pageblock "skip" to avoid rescanning in the near
+ * future. This will isolate more pages than necessary
+ * for the request but avoid loops due to
+ * fast_find_migrateblock revisiting blocks that were
+ * recently partially scanned.
*/
- if (cc->direct_compaction &&
- (cc->mode == MIGRATE_ASYNC)) {
- cc->migrate_pfn = block_end_pfn(
- cc->migrate_pfn - 1, cc->order);
- /* Draining pcplists is useless in this case */
- last_migrated_pfn = 0;
+ if (!pageblock_aligned(cc->migrate_pfn) &&
+ !cc->ignore_skip_hint && !cc->finish_pageblock &&
+ (cc->mode < MIGRATE_SYNC)) {
+ cc->finish_pageblock = true;
+
+ /*
+ * Draining pcplists does not help THP if
+ * any page failed to migrate. Even after
+ * drain, the pageblock will not be free.
+ */
+ if (cc->order == COMPACTION_HPAGE_ORDER)
+ last_migrated_pfn = 0;
+
+ goto rescan;
}
}
+ /* Stop if a page has been captured */
+ if (capc && capc->page) {
+ ret = COMPACT_SUCCESS;
+ break;
+ }
+
check_drain:
/*
* Has the migration scanner moved away from the previous
last_migrated_pfn = 0;
}
}
-
- /* Stop if a page has been captured */
- if (capc && capc->page) {
- ret = COMPACT_SUCCESS;
- break;
- }
}
out:
* so we don't leave any returned pages behind in the next attempt.
*/
if (cc->nr_freepages > 0) {
- unsigned long free_pfn = release_freepages(&cc->freepages);
+ unsigned long free_pfn = release_free_list(cc->freepages);
cc->nr_freepages = 0;
VM_BUG_ON(free_pfn == 0);
trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret);
+ VM_BUG_ON(!list_empty(&cc->migratepages));
+
return ret;
}
ret = compact_zone(&cc, &capc);
- VM_BUG_ON(!list_empty(&cc.freepages));
- VM_BUG_ON(!list_empty(&cc.migratepages));
-
/*
* Make sure we hide capture control first before we read the captured
* page pointer, otherwise an interrupt could free and capture a page
return ret;
}
-int sysctl_extfrag_threshold = 500;
-
/**
* try_to_compact_pages - Direct compact to satisfy a high-order allocation
* @gfp_mask: The GFP mask of the current allocation
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, struct page **capture)
{
- int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
struct zoneref *z;
struct zone *zone;
enum compact_result rc = COMPACT_SKIPPED;
- /*
- * Check if the GFP flags allow compaction - GFP_NOIO is really
- * tricky context because the migration might require IO
- */
- if (!may_perform_io)
+ if (!gfp_compaction_allowed(gfp_mask))
return COMPACT_SKIPPED;
trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
}
/*
- * Compact all zones within a node till each zone's fragmentation score
- * reaches within proactive compaction thresholds (as determined by the
- * proactiveness tunable).
+ * compact_node() - compact all zones within a node
+ * @pgdat: The node page data
+ * @proactive: Whether the compaction is proactive
*
- * It is possible that the function returns before reaching score targets
- * due to various back-off conditions, such as, contention on per-node or
- * per-zone locks.
+ * For proactive compaction, compact till each zone's fragmentation score
+ * reaches within proactive compaction thresholds (as determined by the
+ * proactiveness tunable), it is possible that the function returns before
+ * reaching score targets due to various back-off conditions, such as,
+ * contention on per-node or per-zone locks.
*/
-static void proactive_compact_node(pg_data_t *pgdat)
+static int compact_node(pg_data_t *pgdat, bool proactive)
{
int zoneid;
struct zone *zone;
struct compact_control cc = {
.order = -1,
- .mode = MIGRATE_SYNC_LIGHT,
+ .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC,
.ignore_skip_hint = true,
.whole_zone = true,
.gfp_mask = GFP_KERNEL,
- .proactive_compaction = true,
+ .proactive_compaction = proactive,
};
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
if (!populated_zone(zone))
continue;
- cc.zone = zone;
-
- compact_zone(&cc, NULL);
-
- VM_BUG_ON(!list_empty(&cc.freepages));
- VM_BUG_ON(!list_empty(&cc.migratepages));
- }
-}
-
-/* Compact all zones within a node */
-static void compact_node(int nid)
-{
- pg_data_t *pgdat = NODE_DATA(nid);
- int zoneid;
- struct zone *zone;
- struct compact_control cc = {
- .order = -1,
- .mode = MIGRATE_SYNC,
- .ignore_skip_hint = true,
- .whole_zone = true,
- .gfp_mask = GFP_KERNEL,
- };
-
-
- for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
-
- zone = &pgdat->node_zones[zoneid];
- if (!populated_zone(zone))
- continue;
+ if (fatal_signal_pending(current))
+ return -EINTR;
cc.zone = zone;
compact_zone(&cc, NULL);
- VM_BUG_ON(!list_empty(&cc.freepages));
- VM_BUG_ON(!list_empty(&cc.migratepages));
+ if (proactive) {
+ count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
+ cc.total_migrate_scanned);
+ count_compact_events(KCOMPACTD_FREE_SCANNED,
+ cc.total_free_scanned);
+ }
}
+
+ return 0;
}
-/* Compact all nodes in the system */
-static void compact_nodes(void)
+/* Compact all zones of all nodes in the system */
+static int compact_nodes(void)
{
- int nid;
+ int ret, nid;
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
- for_each_online_node(nid)
- compact_node(nid);
-}
+ for_each_online_node(nid) {
+ ret = compact_node(NODE_DATA(nid), false);
+ if (ret)
+ return ret;
+ }
-/*
- * Tunable for proactive compaction. It determines how
- * aggressively the kernel should compact memory in the
- * background. It takes values in the range [0, 100].
- */
-unsigned int __read_mostly sysctl_compaction_proactiveness = 20;
+ return 0;
+}
-int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
+static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
int rc, nid;
continue;
pgdat->proactive_compact_trigger = true;
+ trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1,
+ pgdat->nr_zones - 1);
wake_up_interruptible(&pgdat->kcompactd_wait);
}
}
* This is the entry point for compacting all nodes via
* /proc/sys/vm/compact_memory
*/
-int sysctl_compaction_handler(struct ctl_table *table, int write,
+static int sysctl_compaction_handler(struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
{
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, length, ppos);
+ if (ret)
+ return ret;
+
+ if (sysctl_compact_memory != 1)
+ return -EINVAL;
+
if (write)
- compact_nodes();
+ ret = compact_nodes();
- return 0;
+ return ret;
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
/* Flush pending updates to the LRU lists */
lru_add_drain_all();
- compact_node(nid);
+ compact_node(NODE_DATA(nid), false);
}
return count;
void compaction_unregister_node(struct node *node)
{
- return device_remove_file(&node->dev, &dev_attr_compact);
+ device_remove_file(&node->dev, &dev_attr_compact);
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
int zoneid;
struct zone *zone;
enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx;
+ enum compact_result ret;
for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) {
zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
- if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
- highest_zoneidx) == COMPACT_CONTINUE)
+ ret = compaction_suit_allocation_order(zone,
+ pgdat->kcompactd_max_order,
+ highest_zoneidx, ALLOC_WMARK_MIN);
+ if (ret == COMPACT_CONTINUE)
return true;
}
.ignore_skip_hint = false,
.gfp_mask = GFP_KERNEL,
};
+ enum compact_result ret;
+
trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
cc.highest_zoneidx);
count_compact_event(KCOMPACTD_WAKE);
if (compaction_deferred(zone, cc.order))
continue;
- if (compaction_suitable(zone, cc.order, 0, zoneid) !=
- COMPACT_CONTINUE)
+ ret = compaction_suit_allocation_order(zone,
+ cc.order, zoneid, ALLOC_WMARK_MIN);
+ if (ret != COMPACT_CONTINUE)
continue;
if (kthread_should_stop())
cc.total_migrate_scanned);
count_compact_events(KCOMPACTD_FREE_SCANNED,
cc.total_free_scanned);
-
- VM_BUG_ON(!list_empty(&cc.freepages));
- VM_BUG_ON(!list_empty(&cc.migratepages));
}
/*
unsigned int prev_score, score;
prev_score = fragmentation_score_node(pgdat);
- proactive_compact_node(pgdat);
+ compact_node(pgdat, true);
score = fragmentation_score_node(pgdat);
/*
* Defer proactive compaction if the fragmentation
* This kcompactd start function will be called by init and node-hot-add.
* On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
*/
-void kcompactd_run(int nid)
+void __meminit kcompactd_run(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
* Called by memory hotplug when all memory in a node is offlined. Caller must
* be holding mem_hotplug_begin/done().
*/
-void kcompactd_stop(int nid)
+void __meminit kcompactd_stop(int nid)
{
struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
return 0;
}
+static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table,
+ int write, void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret, old;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write)
+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ old = *(int *)table->data;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret)
+ return ret;
+ if (old != *(int *)table->data)
+ pr_warn_once("sysctl attribute %s changed by %s[%d]\n",
+ table->procname, current->comm,
+ task_pid_nr(current));
+ return ret;
+}
+
+static struct ctl_table vm_compaction[] = {
+ {
+ .procname = "compact_memory",
+ .data = &sysctl_compact_memory,
+ .maxlen = sizeof(int),
+ .mode = 0200,
+ .proc_handler = sysctl_compaction_handler,
+ },
+ {
+ .procname = "compaction_proactiveness",
+ .data = &sysctl_compaction_proactiveness,
+ .maxlen = sizeof(sysctl_compaction_proactiveness),
+ .mode = 0644,
+ .proc_handler = compaction_proactiveness_sysctl_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE_HUNDRED,
+ },
+ {
+ .procname = "extfrag_threshold",
+ .data = &sysctl_extfrag_threshold,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE_THOUSAND,
+ },
+ {
+ .procname = "compact_unevictable_allowed",
+ .data = &sysctl_compact_unevictable_allowed,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_warn_RT_change,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
static int __init kcompactd_init(void)
{
int nid;
for_each_node_state(nid, N_MEMORY)
kcompactd_run(nid);
+ register_sysctl_init("vm", vm_compaction);
return 0;
}
subsys_initcall(kcompactd_init)