Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[linux-2.6-microblaze.git] / mm / vmscan.c
index b3829c7..650d268 100644 (file)
@@ -203,9 +203,11 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
 {
        unsigned long nr;
 
-       nr = zone_page_state_snapshot(zone, NR_ZONE_LRU_FILE);
+       nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
+               zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
        if (get_nr_swap_pages() > 0)
-               nr += zone_page_state_snapshot(zone, NR_ZONE_LRU_ANON);
+               nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
+                       zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
 
        return nr;
 }
@@ -1366,6 +1368,29 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
        return ret;
 }
 
+
+/*
+ * Update LRU sizes after isolating pages. The LRU size updates must
+ * be complete before mem_cgroup_update_lru_size due to a santity check.
+ */
+static __always_inline void update_lru_sizes(struct lruvec *lruvec,
+                       enum lru_list lru, unsigned long *nr_zone_taken,
+                       unsigned long nr_taken)
+{
+       int zid;
+
+       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+               if (!nr_zone_taken[zid])
+                       continue;
+
+               __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
+       }
+
+#ifdef CONFIG_MEMCG
+       mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
+#endif
+}
+
 /*
  * zone_lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
@@ -1394,11 +1419,12 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        struct list_head *src = &lruvec->lists[lru];
        unsigned long nr_taken = 0;
        unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
+       unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
        unsigned long scan, nr_pages;
        LIST_HEAD(pages_skipped);
 
        for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
-                                       !list_empty(src); scan++) {
+                                       !list_empty(src);) {
                struct page *page;
 
                page = lru_to_page(src);
@@ -1408,9 +1434,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                if (page_zonenum(page) > sc->reclaim_idx) {
                        list_move(&page->lru, &pages_skipped);
+                       nr_skipped[page_zonenum(page)]++;
                        continue;
                }
 
+               /*
+                * Account for scanned and skipped separetly to avoid the pgdat
+                * being prematurely marked unreclaimable by pgdat_reclaimable.
+                */
+               scan++;
+
                switch (__isolate_lru_page(page, mode)) {
                case 0:
                        nr_pages = hpage_nr_pages(page);
@@ -1436,18 +1469,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
         * scanning would soon rescan the same pages to skip and put the
         * system at risk of premature OOM.
         */
-       if (!list_empty(&pages_skipped))
+       if (!list_empty(&pages_skipped)) {
+               int zid;
+               unsigned long total_skipped = 0;
+
+               for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+                       if (!nr_skipped[zid])
+                               continue;
+
+                       __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
+                       total_skipped += nr_skipped[zid];
+               }
+
+               /*
+                * Account skipped pages as a partial scan as the pgdat may be
+                * close to unreclaimable. If the LRU list is empty, account
+                * skipped pages as a full scan.
+                */
+               scan += list_empty(src) ? total_skipped : total_skipped >> 2;
+
                list_splice(&pages_skipped, src);
+       }
        *nr_scanned = scan;
-       trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
+       trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
                                    nr_taken, mode, is_file_lru(lru));
-       for (scan = 0; scan < MAX_NR_ZONES; scan++) {
-               nr_pages = nr_zone_taken[scan];
-               if (!nr_pages)
-                       continue;
-
-               update_lru_size(lruvec, lru, scan, -nr_pages);
-       }
+       update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
        return nr_taken;
 }
 
@@ -1606,6 +1652,30 @@ static int current_may_throttle(void)
                bdi_write_congested(current->backing_dev_info);
 }
 
+static bool inactive_reclaimable_pages(struct lruvec *lruvec,
+                               struct scan_control *sc, enum lru_list lru)
+{
+       int zid;
+       struct zone *zone;
+       int file = is_file_lru(lru);
+       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+
+       if (!global_reclaim(sc))
+               return true;
+
+       for (zid = sc->reclaim_idx; zid >= 0; zid--) {
+               zone = &pgdat->node_zones[zid];
+               if (!populated_zone(zone))
+                       continue;
+
+               if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
+                               LRU_FILE * file) >= SWAP_CLUSTER_MAX)
+                       return true;
+       }
+
+       return false;
+}
+
 /*
  * shrink_inactive_list() is a helper for shrink_node().  It returns the number
  * of reclaimed pages
@@ -1628,6 +1698,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        struct pglist_data *pgdat = lruvec_pgdat(lruvec);
        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 
+       if (!inactive_reclaimable_pages(lruvec, sc, lru))
+               return 0;
+
        while (unlikely(too_many_isolated(pgdat, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
 
@@ -1934,12 +2007,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
  *    1TB     101        10GB
  *   10TB     320        32GB
  */
-static bool inactive_list_is_low(struct lruvec *lruvec, bool file)
+static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
+                                               struct scan_control *sc)
 {
        unsigned long inactive_ratio;
        unsigned long inactive;
        unsigned long active;
        unsigned long gb;
+       struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+       int zid;
 
        /*
         * If we don't have swap space, anonymous page deactivation
@@ -1951,6 +2027,27 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file)
        inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
        active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
 
+       /*
+        * For zone-constrained allocations, it is necessary to check if
+        * deactivations are required for lowmem to be reclaimed. This
+        * calculates the inactive/active pages available in eligible zones.
+        */
+       for (zid = sc->reclaim_idx + 1; zid < MAX_NR_ZONES; zid++) {
+               struct zone *zone = &pgdat->node_zones[zid];
+               unsigned long inactive_zone, active_zone;
+
+               if (!populated_zone(zone))
+                       continue;
+
+               inactive_zone = zone_page_state(zone,
+                               NR_ZONE_LRU_BASE + (file * LRU_FILE));
+               active_zone = zone_page_state(zone,
+                               NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
+
+               inactive -= min(inactive, inactive_zone);
+               active -= min(active, active_zone);
+       }
+
        gb = (inactive + active) >> (30 - PAGE_SHIFT);
        if (gb)
                inactive_ratio = int_sqrt(10 * gb);
@@ -1964,7 +2061,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
                                 struct lruvec *lruvec, struct scan_control *sc)
 {
        if (is_active_lru(lru)) {
-               if (inactive_list_is_low(lruvec, is_file_lru(lru)))
+               if (inactive_list_is_low(lruvec, is_file_lru(lru), sc))
                        shrink_active_list(nr_to_scan, lruvec, sc, lru);
                return 0;
        }
@@ -2095,7 +2192,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
         * lruvec even if it has plenty of old anonymous pages unless the
         * system is under heavy pressure.
         */
-       if (!inactive_list_is_low(lruvec, true) &&
+       if (!inactive_list_is_low(lruvec, true, sc) &&
            lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
                scan_balance = SCAN_FILE;
                goto out;
@@ -2337,7 +2434,7 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_list_is_low(lruvec, false))
+       if (inactive_list_is_low(lruvec, false, sc))
                shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                   sc, LRU_ACTIVE_ANON);
 
@@ -2584,9 +2681,6 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        sc->reclaim_idx, sc->nodemask) {
-               if (!populated_zone(zone))
-                       continue;
-
                /*
                 * Take care memory controller reclaiming has small influence
                 * to global LRU.
@@ -2680,7 +2774,7 @@ retry:
        delayacct_freepages_start();
 
        if (global_reclaim(sc))
-               count_vm_event(ALLOCSTALL);
+               __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);
 
        do {
                vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
@@ -2889,7 +2983,8 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 
        trace_mm_vmscan_direct_reclaim_begin(order,
                                sc.may_writepage,
-                               gfp_mask);
+                               gfp_mask,
+                               sc.reclaim_idx);
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
@@ -2920,7 +3015,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
 
        trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
                                                      sc.may_writepage,
-                                                     sc.gfp_mask);
+                                                     sc.gfp_mask,
+                                                     sc.reclaim_idx);
 
        /*
         * NOTE: Although we can get the priority field, using it
@@ -2968,7 +3064,8 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
 
        trace_mm_vmscan_memcg_reclaim_begin(0,
                                            sc.may_writepage,
-                                           sc.gfp_mask);
+                                           sc.gfp_mask,
+                                           sc.reclaim_idx);
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
@@ -2990,7 +3087,7 @@ static void age_active_anon(struct pglist_data *pgdat,
        do {
                struct lruvec *lruvec = mem_cgroup_lruvec(pgdat, memcg);
 
-               if (inactive_list_is_low(lruvec, false))
+               if (inactive_list_is_low(lruvec, false, sc))
                        shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
                                           sc, LRU_ACTIVE_ANON);
 
@@ -3386,7 +3483,8 @@ kswapd_try_sleep:
                 * but kcompactd is woken to compact for the original
                 * request (alloc_order).
                 */
-               trace_mm_vmscan_kswapd_wake(pgdat->node_id, alloc_order);
+               trace_mm_vmscan_kswapd_wake(pgdat->node_id, classzone_idx,
+                                               alloc_order);
                reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx);
                if (reclaim_order < alloc_order)
                        goto kswapd_try_sleep;
@@ -3752,24 +3850,23 @@ int page_evictable(struct page *page)
 void check_move_unevictable_pages(struct page **pages, int nr_pages)
 {
        struct lruvec *lruvec;
-       struct zone *zone = NULL;
+       struct pglist_data *pgdat = NULL;
        int pgscanned = 0;
        int pgrescued = 0;
        int i;
 
        for (i = 0; i < nr_pages; i++) {
                struct page *page = pages[i];
-               struct zone *pagezone;
+               struct pglist_data *pagepgdat = page_pgdat(page);
 
                pgscanned++;
-               pagezone = page_zone(page);
-               if (pagezone != zone) {
-                       if (zone)
-                               spin_unlock_irq(zone_lru_lock(zone));
-                       zone = pagezone;
-                       spin_lock_irq(zone_lru_lock(zone));
+               if (pagepgdat != pgdat) {
+                       if (pgdat)
+                               spin_unlock_irq(&pgdat->lru_lock);
+                       pgdat = pagepgdat;
+                       spin_lock_irq(&pgdat->lru_lock);
                }
-               lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
+               lruvec = mem_cgroup_page_lruvec(page, pgdat);
 
                if (!PageLRU(page) || !PageUnevictable(page))
                        continue;
@@ -3785,10 +3882,10 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
                }
        }
 
-       if (zone) {
+       if (pgdat) {
                __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
                __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
-               spin_unlock_irq(zone_lru_lock(zone));
+               spin_unlock_irq(&pgdat->lru_lock);
        }
 }
 #endif /* CONFIG_SHMEM */