r8169: Avoid memcpy() over-reading of ETH_SS_STATS
[linux-2.6-microblaze.git] / mm / vmscan.c
index 19b134d..5199b96 100644 (file)
@@ -346,6 +346,9 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 {
        int id, ret = -ENOMEM;
 
+       if (mem_cgroup_disabled())
+               return -ENOSYS;
+
        down_write(&shrinker_rwsem);
        /* This may call shrinker, so it must use down_read_trylock() */
        id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
@@ -394,6 +397,30 @@ static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
        return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
 }
 
+void reparent_shrinker_deferred(struct mem_cgroup *memcg)
+{
+       int i, nid;
+       long nr;
+       struct mem_cgroup *parent;
+       struct shrinker_info *child_info, *parent_info;
+
+       parent = parent_mem_cgroup(memcg);
+       if (!parent)
+               parent = root_mem_cgroup;
+
+       /* Prevent from concurrent shrinker_info expand */
+       down_read(&shrinker_rwsem);
+       for_each_node(nid) {
+               child_info = shrinker_info_protected(memcg, nid);
+               parent_info = shrinker_info_protected(parent, nid);
+               for (i = 0; i < shrinker_nr_max; i++) {
+                       nr = atomic_long_read(&child_info->nr_deferred[i]);
+                       atomic_long_add(nr, &parent_info->nr_deferred[i]);
+               }
+       }
+       up_read(&shrinker_rwsem);
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
        return sc->target_mem_cgroup;
@@ -425,7 +452,7 @@ static bool writeback_throttling_sane(struct scan_control *sc)
 #else
 static int prealloc_memcg_shrinker(struct shrinker *shrinker)
 {
-       return 0;
+       return -ENOSYS;
 }
 
 static void unregister_memcg_shrinker(struct shrinker *shrinker)
@@ -537,8 +564,18 @@ static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
  */
 int prealloc_shrinker(struct shrinker *shrinker)
 {
-       unsigned int size = sizeof(*shrinker->nr_deferred);
+       unsigned int size;
+       int err;
+
+       if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
+               err = prealloc_memcg_shrinker(shrinker);
+               if (err != -ENOSYS)
+                       return err;
+
+               shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
+       }
 
+       size = sizeof(*shrinker->nr_deferred);
        if (shrinker->flags & SHRINKER_NUMA_AWARE)
                size *= nr_node_ids;
 
@@ -546,28 +583,16 @@ int prealloc_shrinker(struct shrinker *shrinker)
        if (!shrinker->nr_deferred)
                return -ENOMEM;
 
-       if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
-               if (prealloc_memcg_shrinker(shrinker))
-                       goto free_deferred;
-       }
-
        return 0;
-
-free_deferred:
-       kfree(shrinker->nr_deferred);
-       shrinker->nr_deferred = NULL;
-       return -ENOMEM;
 }
 
 void free_prealloced_shrinker(struct shrinker *shrinker)
 {
-       if (!shrinker->nr_deferred)
-               return;
-
        if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
                down_write(&shrinker_rwsem);
                unregister_memcg_shrinker(shrinker);
                up_write(&shrinker_rwsem);
+               return;
        }
 
        kfree(shrinker->nr_deferred);
@@ -639,7 +664,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
         */
        nr = xchg_nr_deferred(shrinker, shrinkctl);
 
-       total_scan = nr;
        if (shrinker->seeks) {
                delta = freeable >> priority;
                delta *= 4;
@@ -653,37 +677,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
                delta = freeable / 2;
        }
 
+       total_scan = nr >> priority;
        total_scan += delta;
-       if (total_scan < 0) {
-               pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
-                      shrinker->scan_objects, total_scan);
-               total_scan = freeable;
-               next_deferred = nr;
-       } else
-               next_deferred = total_scan;
-
-       /*
-        * We need to avoid excessive windup on filesystem shrinkers
-        * due to large numbers of GFP_NOFS allocations causing the
-        * shrinkers to return -1 all the time. This results in a large
-        * nr being built up so when a shrink that can do some work
-        * comes along it empties the entire cache due to nr >>>
-        * freeable. This is bad for sustaining a working set in
-        * memory.
-        *
-        * Hence only allow the shrinker to scan the entire cache when
-        * a large delta change is calculated directly.
-        */
-       if (delta < freeable / 4)
-               total_scan = min(total_scan, freeable / 2);
-
-       /*
-        * Avoid risking looping forever due to too large nr value:
-        * never try to free more than twice the estimate number of
-        * freeable entries.
-        */
-       if (total_scan > freeable * 2)
-               total_scan = freeable * 2;
+       total_scan = min(total_scan, (2 * freeable));
 
        trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
                                   freeable, delta, total_scan, priority);
@@ -722,10 +718,15 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
                cond_resched();
        }
 
-       if (next_deferred >= scanned)
-               next_deferred -= scanned;
-       else
-               next_deferred = 0;
+       /*
+        * The deferred work is increased by any new work (delta) that wasn't
+        * done, decreased by old deferred work that was done now.
+        *
+        * And it is capped to two times of the freeable items.
+        */
+       next_deferred = max_t(long, (nr + delta - scanned), 0);
+       next_deferred = min(next_deferred, (2 * freeable));
+
        /*
         * move the unused scan count back into the shrinker in a
         * manner that handles concurrent updates.
@@ -4058,7 +4059,7 @@ static int kswapd(void *p)
 {
        unsigned int alloc_order, reclaim_order;
        unsigned int highest_zoneidx = MAX_NR_ZONES - 1;
-       pg_data_t *pgdat = (pg_data_t*)p;
+       pg_data_t *pgdat = (pg_data_t *)p;
        struct task_struct *tsk = current;
        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);