X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=mm%2Fvmscan.c;h=4620df62f0ffa1e6e631c041f8f236af389619f4;hb=c5e2bf0b4ae8ea1df6c352028459b1a415fe08dd;hp=5199b9696babf7a44c1dc500d63e2d2aefe23986;hpb=a4345a7cecfb91ae78cd43d26b0c6a956420761a;p=linux-2.6-microblaze.git diff --git a/mm/vmscan.c b/mm/vmscan.c index 5199b9696bab..4620df62f0ff 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list, if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; - if (!try_to_unmap(page, flags)) { + try_to_unmap(page, flags); + if (page_mapped(page)) { stat->nr_unmap_fail += nr_pages; if (!was_swapbacked && PageSwapBacked(page)) stat->nr_lazyfree_fail += nr_pages; @@ -1701,6 +1702,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, unsigned int nr_reclaimed; struct page *page, *next; LIST_HEAD(clean_pages); + unsigned int noreclaim_flag; list_for_each_entry_safe(page, next, page_list, lru) { if (!PageHuge(page) && page_is_file_lru(page) && @@ -1711,8 +1713,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone, } } + /* + * We should be safe here since we are only dealing with file pages and + * we are not kswapd and therefore cannot write dirty file pages. But + * call memalloc_noreclaim_save() anyway, just in case these conditions + * change in the future. + */ + noreclaim_flag = memalloc_noreclaim_save(); nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc, &stat, true); + memalloc_noreclaim_restore(noreclaim_flag); + list_splice(&clean_pages, page_list); mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, -(long)nr_reclaimed); @@ -1810,7 +1821,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, } -/** +/* * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * * lruvec->lru_lock is heavily contended. Some of the functions that @@ -2015,8 +2026,8 @@ static int too_many_isolated(struct pglist_data *pgdat, int file, * * Returns the number of pages moved to the given lruvec. */ -static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, - struct list_head *list) +static unsigned int move_pages_to_lru(struct lruvec *lruvec, + struct list_head *list) { int nr_pages, nr_moved = 0; LIST_HEAD(pages_to_free); @@ -2063,7 +2074,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec, * All pages were isolated from the same lruvec (and isolation * inhibits memcg migration). */ - VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page); + VM_BUG_ON_PAGE(!page_matches_lruvec(page, lruvec), page); add_page_to_lru_list(page, lruvec); nr_pages = thp_nr_pages(page); nr_moved += nr_pages; @@ -2096,7 +2107,7 @@ static int current_may_throttle(void) * shrink_inactive_list() is a helper for shrink_node(). It returns the number * of reclaimed pages */ -static noinline_for_stack unsigned long +static unsigned long shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { @@ -2306,6 +2317,7 @@ unsigned long reclaim_pages(struct list_head *page_list) LIST_HEAD(node_page_list); struct reclaim_stat dummy_stat; struct page *page; + unsigned int noreclaim_flag; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .priority = DEF_PRIORITY, @@ -2314,6 +2326,8 @@ unsigned long reclaim_pages(struct list_head *page_list) .may_swap = 1, }; + noreclaim_flag = memalloc_noreclaim_save(); + while (!list_empty(page_list)) { page = lru_to_page(page_list); if (nid == NUMA_NO_NODE) { @@ -2350,6 +2364,8 @@ unsigned long reclaim_pages(struct list_head *page_list) } } + memalloc_noreclaim_restore(noreclaim_flag); + return nr_reclaimed; } @@ -3722,6 +3738,38 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, return sc->nr_scanned >= sc->nr_to_reclaim; } +/* Page allocator PCP high watermark is lowered if reclaim is active. */ +static inline void +update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) +{ + int i; + struct zone *zone; + + for (i = 0; i <= highest_zoneidx; i++) { + zone = pgdat->node_zones + i; + + if (!managed_zone(zone)) + continue; + + if (active) + set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); + else + clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); + } +} + +static inline void +set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) +{ + update_reclaim_active(pgdat, highest_zoneidx, true); +} + +static inline void +clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) +{ + update_reclaim_active(pgdat, highest_zoneidx, false); +} + /* * For kswapd, balance_pgdat() will reclaim pages across a node from zones * that are eligible for use by the caller until at least one zone is @@ -3774,6 +3822,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) boosted = nr_boost_reclaim; restart: + set_reclaim_active(pgdat, highest_zoneidx); sc.priority = DEF_PRIORITY; do { unsigned long nr_reclaimed = sc.nr_reclaimed; @@ -3907,6 +3956,8 @@ restart: pgdat->kswapd_failures++; out: + clear_reclaim_active(pgdat, highest_zoneidx); + /* If reclaim was boosted, account for the reclaim done in this pass */ if (boosted) { unsigned long flags;