Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-microblaze.git] / mm / vmscan.c
index 749d239..72da290 100644 (file)
@@ -170,11 +170,6 @@ struct scan_control {
  * From 0 .. 200.  Higher means more swappy.
  */
 int vm_swappiness = 60;
-/*
- * The total number of pages which are beyond the high watermark within all
- * zones.
- */
-unsigned long vm_total_pages;
 
 static void set_task_reclaim_state(struct task_struct *task,
                                   struct reclaim_state *rs)
@@ -915,7 +910,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                 * order to detect refaults, thus thrashing, later on.
                 *
                 * But don't store shadows in an address space that is
-                * already exiting.  This is not just an optizimation,
+                * already exiting.  This is not just an optimization,
                 * inode reclaim needs to empty out the radix tree or
                 * the nodes are lost.  Don't plant shadows behind its
                 * back.
@@ -2035,7 +2030,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
 
        __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
 
-       __count_vm_events(PGREFILL, nr_scanned);
+       if (!cgroup_reclaim(sc))
+               __count_vm_events(PGREFILL, nr_scanned);
        __count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
 
        spin_unlock_irq(&pgdat->lru_lock);
@@ -2331,7 +2327,8 @@ out:
                unsigned long protection;
 
                lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
-               protection = mem_cgroup_protection(memcg,
+               protection = mem_cgroup_protection(sc->target_mem_cgroup,
+                                                  memcg,
                                                   sc->memcg_low_reclaim);
 
                if (protection) {
@@ -2619,14 +2616,15 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
                unsigned long reclaimed;
                unsigned long scanned;
 
-               switch (mem_cgroup_protected(target_memcg, memcg)) {
-               case MEMCG_PROT_MIN:
+               mem_cgroup_calculate_protection(target_memcg, memcg);
+
+               if (mem_cgroup_below_min(memcg)) {
                        /*
                         * Hard protection.
                         * If there is no reclaimable memory, OOM.
                         */
                        continue;
-               case MEMCG_PROT_LOW:
+               } else if (mem_cgroup_below_low(memcg)) {
                        /*
                         * Soft protection.
                         * Respect the protection only as long as
@@ -2638,16 +2636,6 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
                                continue;
                        }
                        memcg_memory_event(memcg, MEMCG_LOW);
-                       break;
-               case MEMCG_PROT_NONE:
-                       /*
-                        * All protection thresholds breached. We may
-                        * still choose to vary the scan pressure
-                        * applied based on by how much the cgroup in
-                        * question has exceeded its protection
-                        * thresholds (see get_scan_count).
-                        */
-                       break;
                }
 
                reclaimed = sc->nr_reclaimed;
@@ -3318,7 +3306,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
                                           bool may_swap)
 {
        unsigned long nr_reclaimed;
-       unsigned long pflags;
        unsigned int noreclaim_flag;
        struct scan_control sc = {
                .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
@@ -3339,17 +3326,12 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
        struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
 
        set_task_reclaim_state(current, &sc.reclaim_state);
-
        trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
-
-       psi_memstall_enter(&pflags);
        noreclaim_flag = memalloc_noreclaim_save();
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
        memalloc_noreclaim_restore(noreclaim_flag);
-       psi_memstall_leave(&pflags);
-
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
        set_task_reclaim_state(current, NULL);
 
@@ -4222,7 +4204,8 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
         * unmapped file backed pages.
         */
        if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
-           node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
+           node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
+           pgdat->min_slab_pages)
                return NODE_RECLAIM_FULL;
 
        /*