filemap: check compound_head(page)->mapping in pagecache_get_page()
[linux-2.6-microblaze.git] / mm / vmscan.c
index a6c5d0b..c27dd62 100644 (file)
@@ -1149,7 +1149,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                VM_BUG_ON_PAGE(PageActive(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
 
                /* Account the number of base pages even though THP */
                sc->nr_scanned += nr_pages;
@@ -1705,7 +1705,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                VM_BUG_ON_PAGE(!PageLRU(page), page);
 
-               nr_pages = 1 << compound_order(page);
+               nr_pages = compound_nr(page);
                total_scan += nr_pages;
 
                if (page_zonenum(page) > sc->reclaim_idx) {
@@ -2586,7 +2586,6 @@ static bool in_reclaim_compaction(struct scan_control *sc)
  */
 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
                                        unsigned long nr_reclaimed,
-                                       unsigned long nr_scanned,
                                        struct scan_control *sc)
 {
        unsigned long pages_for_compaction;
@@ -2597,40 +2596,18 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
        if (!in_reclaim_compaction(sc))
                return false;
 
-       /* Consider stopping depending on scan and reclaim activity */
-       if (sc->gfp_mask & __GFP_RETRY_MAYFAIL) {
-               /*
-                * For __GFP_RETRY_MAYFAIL allocations, stop reclaiming if the
-                * full LRU list has been scanned and we are still failing
-                * to reclaim pages. This full LRU scan is potentially
-                * expensive but a __GFP_RETRY_MAYFAIL caller really wants to succeed
-                */
-               if (!nr_reclaimed && !nr_scanned)
-                       return false;
-       } else {
-               /*
-                * For non-__GFP_RETRY_MAYFAIL allocations which can presumably
-                * fail without consequence, stop if we failed to reclaim
-                * any pages from the last SWAP_CLUSTER_MAX number of
-                * pages that were scanned. This will return to the
-                * caller faster at the risk reclaim/compaction and
-                * the resulting allocation attempt fails
-                */
-               if (!nr_reclaimed)
-                       return false;
-       }
-
        /*
-        * If we have not reclaimed enough pages for compaction and the
-        * inactive lists are large enough, continue reclaiming
+        * Stop if we failed to reclaim any pages from the last SWAP_CLUSTER_MAX
+        * number of pages that were scanned. This will return to the caller
+        * with the risk reclaim/compaction and the resulting allocation attempt
+        * fails. In the past we have tried harder for __GFP_RETRY_MAYFAIL
+        * allocations through requiring that the full LRU list has been scanned
+        * first, by assuming that zero delta of sc->nr_scanned means full LRU
+        * scan, but that approximation was wrong, and there were corner cases
+        * where always a non-zero amount of pages were scanned.
         */
-       pages_for_compaction = compact_gap(sc->order);
-       inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
-       if (get_nr_swap_pages() > 0)
-               inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
-       if (sc->nr_reclaimed < pages_for_compaction &&
-                       inactive_lru_pages > pages_for_compaction)
-               return true;
+       if (!nr_reclaimed)
+               return false;
 
        /* If compaction would go ahead or the allocation would succeed, stop */
        for (z = 0; z <= sc->reclaim_idx; z++) {
@@ -2647,7 +2624,17 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
                        ;
                }
        }
-       return true;
+
+       /*
+        * If we have not reclaimed enough pages for compaction and the
+        * inactive lists are large enough, continue reclaiming
+        */
+       pages_for_compaction = compact_gap(sc->order);
+       inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
+       if (get_nr_swap_pages() > 0)
+               inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
+
+       return inactive_lru_pages > pages_for_compaction;
 }
 
 static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
@@ -2664,10 +2651,6 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
 
        do {
                struct mem_cgroup *root = sc->target_mem_cgroup;
-               struct mem_cgroup_reclaim_cookie reclaim = {
-                       .pgdat = pgdat,
-                       .priority = sc->priority,
-               };
                unsigned long node_lru_pages = 0;
                struct mem_cgroup *memcg;
 
@@ -2676,7 +2659,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                nr_reclaimed = sc->nr_reclaimed;
                nr_scanned = sc->nr_scanned;
 
-               memcg = mem_cgroup_iter(root, NULL, &reclaim);
+               memcg = mem_cgroup_iter(root, NULL, NULL);
                do {
                        unsigned long lru_pages;
                        unsigned long reclaimed;
@@ -2719,21 +2702,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                                   sc->nr_scanned - scanned,
                                   sc->nr_reclaimed - reclaimed);
 
-                       /*
-                        * Kswapd have to scan all memory cgroups to fulfill
-                        * the overall scan target for the node.
-                        *
-                        * Limit reclaim, on the other hand, only cares about
-                        * nr_to_reclaim pages to be reclaimed and it will
-                        * retry with decreasing priority if one round over the
-                        * whole hierarchy is not sufficient.
-                        */
-                       if (!current_is_kswapd() &&
-                                       sc->nr_reclaimed >= sc->nr_to_reclaim) {
-                               mem_cgroup_iter_break(root, memcg);
-                               break;
-                       }
-               } while ((memcg = mem_cgroup_iter(root, memcg, &reclaim)));
+               } while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
 
                if (reclaim_state) {
                        sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -2810,7 +2779,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                        wait_iff_congested(BLK_RW_ASYNC, HZ/10);
 
        } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
-                                        sc->nr_scanned - nr_scanned, sc));
+                                        sc));
 
        /*
         * Kswapd gives up on balancing particular nodes after too