kconfig: qconf: remove unused ConfigItem::okRename()
[linux-2.6-microblaze.git] / mm / slab_common.c
index fe8b684..f9ccd5d 100644 (file)
@@ -26,6 +26,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/kmem.h>
 
+#include "internal.h"
+
 #include "slab.h"
 
 enum slab_state slab_state;
@@ -128,152 +130,6 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
        return i;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
-
-LIST_HEAD(slab_root_caches);
-static DEFINE_SPINLOCK(memcg_kmem_wq_lock);
-
-static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref);
-
-void slab_init_memcg_params(struct kmem_cache *s)
-{
-       s->memcg_params.root_cache = NULL;
-       RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
-       INIT_LIST_HEAD(&s->memcg_params.children);
-       s->memcg_params.dying = false;
-}
-
-static int init_memcg_params(struct kmem_cache *s,
-                            struct kmem_cache *root_cache)
-{
-       struct memcg_cache_array *arr;
-
-       if (root_cache) {
-               int ret = percpu_ref_init(&s->memcg_params.refcnt,
-                                         kmemcg_cache_shutdown,
-                                         0, GFP_KERNEL);
-               if (ret)
-                       return ret;
-
-               s->memcg_params.root_cache = root_cache;
-               INIT_LIST_HEAD(&s->memcg_params.children_node);
-               INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
-               return 0;
-       }
-
-       slab_init_memcg_params(s);
-
-       if (!memcg_nr_cache_ids)
-               return 0;
-
-       arr = kvzalloc(sizeof(struct memcg_cache_array) +
-                      memcg_nr_cache_ids * sizeof(void *),
-                      GFP_KERNEL);
-       if (!arr)
-               return -ENOMEM;
-
-       RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
-       return 0;
-}
-
-static void destroy_memcg_params(struct kmem_cache *s)
-{
-       if (is_root_cache(s)) {
-               kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
-       } else {
-               mem_cgroup_put(s->memcg_params.memcg);
-               WRITE_ONCE(s->memcg_params.memcg, NULL);
-               percpu_ref_exit(&s->memcg_params.refcnt);
-       }
-}
-
-static void free_memcg_params(struct rcu_head *rcu)
-{
-       struct memcg_cache_array *old;
-
-       old = container_of(rcu, struct memcg_cache_array, rcu);
-       kvfree(old);
-}
-
-static int update_memcg_params(struct kmem_cache *s, int new_array_size)
-{
-       struct memcg_cache_array *old, *new;
-
-       new = kvzalloc(sizeof(struct memcg_cache_array) +
-                      new_array_size * sizeof(void *), GFP_KERNEL);
-       if (!new)
-               return -ENOMEM;
-
-       old = rcu_dereference_protected(s->memcg_params.memcg_caches,
-                                       lockdep_is_held(&slab_mutex));
-       if (old)
-               memcpy(new->entries, old->entries,
-                      memcg_nr_cache_ids * sizeof(void *));
-
-       rcu_assign_pointer(s->memcg_params.memcg_caches, new);
-       if (old)
-               call_rcu(&old->rcu, free_memcg_params);
-       return 0;
-}
-
-int memcg_update_all_caches(int num_memcgs)
-{
-       struct kmem_cache *s;
-       int ret = 0;
-
-       mutex_lock(&slab_mutex);
-       list_for_each_entry(s, &slab_root_caches, root_caches_node) {
-               ret = update_memcg_params(s, num_memcgs);
-               /*
-                * Instead of freeing the memory, we'll just leave the caches
-                * up to this point in an updated state.
-                */
-               if (ret)
-                       break;
-       }
-       mutex_unlock(&slab_mutex);
-       return ret;
-}
-
-void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg)
-{
-       if (is_root_cache(s)) {
-               list_add(&s->root_caches_node, &slab_root_caches);
-       } else {
-               css_get(&memcg->css);
-               s->memcg_params.memcg = memcg;
-               list_add(&s->memcg_params.children_node,
-                        &s->memcg_params.root_cache->memcg_params.children);
-               list_add(&s->memcg_params.kmem_caches_node,
-                        &s->memcg_params.memcg->kmem_caches);
-       }
-}
-
-static void memcg_unlink_cache(struct kmem_cache *s)
-{
-       if (is_root_cache(s)) {
-               list_del(&s->root_caches_node);
-       } else {
-               list_del(&s->memcg_params.children_node);
-               list_del(&s->memcg_params.kmem_caches_node);
-       }
-}
-#else
-static inline int init_memcg_params(struct kmem_cache *s,
-                                   struct kmem_cache *root_cache)
-{
-       return 0;
-}
-
-static inline void destroy_memcg_params(struct kmem_cache *s)
-{
-}
-
-static inline void memcg_unlink_cache(struct kmem_cache *s)
-{
-}
-#endif /* CONFIG_MEMCG_KMEM */
-
 /*
  * Figure out what the alignment of the objects will be given a set of
  * flags, a user specified alignment and the size of the objects.
@@ -311,9 +167,6 @@ int slab_unmergeable(struct kmem_cache *s)
        if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
                return 1;
 
-       if (!is_root_cache(s))
-               return 1;
-
        if (s->ctor)
                return 1;
 
@@ -326,14 +179,6 @@ int slab_unmergeable(struct kmem_cache *s)
        if (s->refcount < 0)
                return 1;
 
-#ifdef CONFIG_MEMCG_KMEM
-       /*
-        * Skip the dying kmem_cache.
-        */
-       if (s->memcg_params.dying)
-               return 1;
-#endif
-
        return 0;
 }
 
@@ -356,7 +201,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
        if (flags & SLAB_NEVER_MERGE)
                return NULL;
 
-       list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
+       list_for_each_entry_reverse(s, &slab_caches, list) {
                if (slab_unmergeable(s))
                        continue;
 
@@ -388,7 +233,7 @@ static struct kmem_cache *create_cache(const char *name,
                unsigned int object_size, unsigned int align,
                slab_flags_t flags, unsigned int useroffset,
                unsigned int usersize, void (*ctor)(void *),
-               struct mem_cgroup *memcg, struct kmem_cache *root_cache)
+               struct kmem_cache *root_cache)
 {
        struct kmem_cache *s;
        int err;
@@ -408,24 +253,18 @@ static struct kmem_cache *create_cache(const char *name,
        s->useroffset = useroffset;
        s->usersize = usersize;
 
-       err = init_memcg_params(s, root_cache);
-       if (err)
-               goto out_free_cache;
-
        err = __kmem_cache_create(s, flags);
        if (err)
                goto out_free_cache;
 
        s->refcount = 1;
        list_add(&s->list, &slab_caches);
-       memcg_link_cache(s, memcg);
 out:
        if (err)
                return ERR_PTR(err);
        return s;
 
 out_free_cache:
-       destroy_memcg_params(s);
        kmem_cache_free(kmem_cache, s);
        goto out;
 }
@@ -471,7 +310,6 @@ kmem_cache_create_usercopy(const char *name,
 
        get_online_cpus();
        get_online_mems();
-       memcg_get_cache_ids();
 
        mutex_lock(&slab_mutex);
 
@@ -512,7 +350,7 @@ kmem_cache_create_usercopy(const char *name,
 
        s = create_cache(cache_name, size,
                         calculate_alignment(flags, align, size),
-                        flags, useroffset, usersize, ctor, NULL, NULL);
+                        flags, useroffset, usersize, ctor, NULL);
        if (IS_ERR(s)) {
                err = PTR_ERR(s);
                kfree_const(cache_name);
@@ -521,7 +359,6 @@ kmem_cache_create_usercopy(const char *name,
 out_unlock:
        mutex_unlock(&slab_mutex);
 
-       memcg_put_cache_ids();
        put_online_mems();
        put_online_cpus();
 
@@ -582,7 +419,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
        /*
         * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
         * @slab_caches_to_rcu_destroy list.  The slab pages are freed
-        * through RCU and and the associated kmem_cache are dereferenced
+        * through RCU and the associated kmem_cache are dereferenced
         * while freeing the pages, so the kmem_caches should be freed only
         * after the pending RCU operations are finished.  As rcu_barrier()
         * is a pretty slow operation, we batch all pending destructions
@@ -614,7 +451,6 @@ static int shutdown_cache(struct kmem_cache *s)
        if (__kmem_cache_shutdown(s) != 0)
                return -EBUSY;
 
-       memcg_unlink_cache(s);
        list_del(&s->list);
 
        if (s->flags & SLAB_TYPESAFE_BY_RCU) {
@@ -635,311 +471,9 @@ static int shutdown_cache(struct kmem_cache *s)
        return 0;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
-/*
- * memcg_create_kmem_cache - Create a cache for a memory cgroup.
- * @memcg: The memory cgroup the new cache is for.
- * @root_cache: The parent of the new cache.
- *
- * This function attempts to create a kmem cache that will serve allocation
- * requests going from @memcg to @root_cache. The new cache inherits properties
- * from its parent.
- */
-void memcg_create_kmem_cache(struct mem_cgroup *memcg,
-                            struct kmem_cache *root_cache)
-{
-       static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
-       struct cgroup_subsys_state *css = &memcg->css;
-       struct memcg_cache_array *arr;
-       struct kmem_cache *s = NULL;
-       char *cache_name;
-       int idx;
-
-       get_online_cpus();
-       get_online_mems();
-
-       mutex_lock(&slab_mutex);
-
-       /*
-        * The memory cgroup could have been offlined while the cache
-        * creation work was pending.
-        */
-       if (memcg->kmem_state != KMEM_ONLINE)
-               goto out_unlock;
-
-       idx = memcg_cache_id(memcg);
-       arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
-                                       lockdep_is_held(&slab_mutex));
-
-       /*
-        * Since per-memcg caches are created asynchronously on first
-        * allocation (see memcg_kmem_get_cache()), several threads can try to
-        * create the same cache, but only one of them may succeed.
-        */
-       if (arr->entries[idx])
-               goto out_unlock;
-
-       cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
-       cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
-                              css->serial_nr, memcg_name_buf);
-       if (!cache_name)
-               goto out_unlock;
-
-       s = create_cache(cache_name, root_cache->object_size,
-                        root_cache->align,
-                        root_cache->flags & CACHE_CREATE_MASK,
-                        root_cache->useroffset, root_cache->usersize,
-                        root_cache->ctor, memcg, root_cache);
-       /*
-        * If we could not create a memcg cache, do not complain, because
-        * that's not critical at all as we can always proceed with the root
-        * cache.
-        */
-       if (IS_ERR(s)) {
-               kfree(cache_name);
-               goto out_unlock;
-       }
-
-       /*
-        * Since readers won't lock (see memcg_kmem_get_cache()), we need a
-        * barrier here to ensure nobody will see the kmem_cache partially
-        * initialized.
-        */
-       smp_wmb();
-       arr->entries[idx] = s;
-
-out_unlock:
-       mutex_unlock(&slab_mutex);
-
-       put_online_mems();
-       put_online_cpus();
-}
-
-static void kmemcg_workfn(struct work_struct *work)
-{
-       struct kmem_cache *s = container_of(work, struct kmem_cache,
-                                           memcg_params.work);
-
-       get_online_cpus();
-       get_online_mems();
-
-       mutex_lock(&slab_mutex);
-       s->memcg_params.work_fn(s);
-       mutex_unlock(&slab_mutex);
-
-       put_online_mems();
-       put_online_cpus();
-}
-
-static void kmemcg_rcufn(struct rcu_head *head)
-{
-       struct kmem_cache *s = container_of(head, struct kmem_cache,
-                                           memcg_params.rcu_head);
-
-       /*
-        * We need to grab blocking locks.  Bounce to ->work.  The
-        * work item shares the space with the RCU head and can't be
-        * initialized earlier.
-        */
-       INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
-       queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
-}
-
-static void kmemcg_cache_shutdown_fn(struct kmem_cache *s)
-{
-       WARN_ON(shutdown_cache(s));
-}
-
-static void kmemcg_cache_shutdown(struct percpu_ref *percpu_ref)
-{
-       struct kmem_cache *s = container_of(percpu_ref, struct kmem_cache,
-                                           memcg_params.refcnt);
-       unsigned long flags;
-
-       spin_lock_irqsave(&memcg_kmem_wq_lock, flags);
-       if (s->memcg_params.root_cache->memcg_params.dying)
-               goto unlock;
-
-       s->memcg_params.work_fn = kmemcg_cache_shutdown_fn;
-       INIT_WORK(&s->memcg_params.work, kmemcg_workfn);
-       queue_work(memcg_kmem_cache_wq, &s->memcg_params.work);
-
-unlock:
-       spin_unlock_irqrestore(&memcg_kmem_wq_lock, flags);
-}
-
-static void kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s)
-{
-       __kmemcg_cache_deactivate_after_rcu(s);
-       percpu_ref_kill(&s->memcg_params.refcnt);
-}
-
-static void kmemcg_cache_deactivate(struct kmem_cache *s)
-{
-       if (WARN_ON_ONCE(is_root_cache(s)))
-               return;
-
-       __kmemcg_cache_deactivate(s);
-       s->flags |= SLAB_DEACTIVATED;
-
-       /*
-        * memcg_kmem_wq_lock is used to synchronize memcg_params.dying
-        * flag and make sure that no new kmem_cache deactivation tasks
-        * are queued (see flush_memcg_workqueue() ).
-        */
-       spin_lock_irq(&memcg_kmem_wq_lock);
-       if (s->memcg_params.root_cache->memcg_params.dying)
-               goto unlock;
-
-       s->memcg_params.work_fn = kmemcg_cache_deactivate_after_rcu;
-       call_rcu(&s->memcg_params.rcu_head, kmemcg_rcufn);
-unlock:
-       spin_unlock_irq(&memcg_kmem_wq_lock);
-}
-
-void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg,
-                                 struct mem_cgroup *parent)
-{
-       int idx;
-       struct memcg_cache_array *arr;
-       struct kmem_cache *s, *c;
-       unsigned int nr_reparented;
-
-       idx = memcg_cache_id(memcg);
-
-       get_online_cpus();
-       get_online_mems();
-
-       mutex_lock(&slab_mutex);
-       list_for_each_entry(s, &slab_root_caches, root_caches_node) {
-               arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
-                                               lockdep_is_held(&slab_mutex));
-               c = arr->entries[idx];
-               if (!c)
-                       continue;
-
-               kmemcg_cache_deactivate(c);
-               arr->entries[idx] = NULL;
-       }
-       nr_reparented = 0;
-       list_for_each_entry(s, &memcg->kmem_caches,
-                           memcg_params.kmem_caches_node) {
-               WRITE_ONCE(s->memcg_params.memcg, parent);
-               css_put(&memcg->css);
-               nr_reparented++;
-       }
-       if (nr_reparented) {
-               list_splice_init(&memcg->kmem_caches,
-                                &parent->kmem_caches);
-               css_get_many(&parent->css, nr_reparented);
-       }
-       mutex_unlock(&slab_mutex);
-
-       put_online_mems();
-       put_online_cpus();
-}
-
-static int shutdown_memcg_caches(struct kmem_cache *s)
-{
-       struct memcg_cache_array *arr;
-       struct kmem_cache *c, *c2;
-       LIST_HEAD(busy);
-       int i;
-
-       BUG_ON(!is_root_cache(s));
-
-       /*
-        * First, shutdown active caches, i.e. caches that belong to online
-        * memory cgroups.
-        */
-       arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
-                                       lockdep_is_held(&slab_mutex));
-       for_each_memcg_cache_index(i) {
-               c = arr->entries[i];
-               if (!c)
-                       continue;
-               if (shutdown_cache(c))
-                       /*
-                        * The cache still has objects. Move it to a temporary
-                        * list so as not to try to destroy it for a second
-                        * time while iterating over inactive caches below.
-                        */
-                       list_move(&c->memcg_params.children_node, &busy);
-               else
-                       /*
-                        * The cache is empty and will be destroyed soon. Clear
-                        * the pointer to it in the memcg_caches array so that
-                        * it will never be accessed even if the root cache
-                        * stays alive.
-                        */
-                       arr->entries[i] = NULL;
-       }
-
-       /*
-        * Second, shutdown all caches left from memory cgroups that are now
-        * offline.
-        */
-       list_for_each_entry_safe(c, c2, &s->memcg_params.children,
-                                memcg_params.children_node)
-               shutdown_cache(c);
-
-       list_splice(&busy, &s->memcg_params.children);
-
-       /*
-        * A cache being destroyed must be empty. In particular, this means
-        * that all per memcg caches attached to it must be empty too.
-        */
-       if (!list_empty(&s->memcg_params.children))
-               return -EBUSY;
-       return 0;
-}
-
-static void memcg_set_kmem_cache_dying(struct kmem_cache *s)
-{
-       spin_lock_irq(&memcg_kmem_wq_lock);
-       s->memcg_params.dying = true;
-       spin_unlock_irq(&memcg_kmem_wq_lock);
-}
-
-static void flush_memcg_workqueue(struct kmem_cache *s)
-{
-       /*
-        * SLAB and SLUB deactivate the kmem_caches through call_rcu. Make
-        * sure all registered rcu callbacks have been invoked.
-        */
-       rcu_barrier();
-
-       /*
-        * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
-        * deactivates the memcg kmem_caches through workqueue. Make sure all
-        * previous workitems on workqueue are processed.
-        */
-       if (likely(memcg_kmem_cache_wq))
-               flush_workqueue(memcg_kmem_cache_wq);
-
-       /*
-        * If we're racing with children kmem_cache deactivation, it might
-        * take another rcu grace period to complete their destruction.
-        * At this moment the corresponding percpu_ref_kill() call should be
-        * done, but it might take another rcu grace period to complete
-        * switching to the atomic mode.
-        * Please, note that we check without grabbing the slab_mutex. It's safe
-        * because at this moment the children list can't grow.
-        */
-       if (!list_empty(&s->memcg_params.children))
-               rcu_barrier();
-}
-#else
-static inline int shutdown_memcg_caches(struct kmem_cache *s)
-{
-       return 0;
-}
-#endif /* CONFIG_MEMCG_KMEM */
-
 void slab_kmem_cache_release(struct kmem_cache *s)
 {
        __kmem_cache_release(s);
-       destroy_memcg_params(s);
        kfree_const(s->name);
        kmem_cache_free(kmem_cache, s);
 }
@@ -960,26 +494,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
        if (s->refcount)
                goto out_unlock;
 
-#ifdef CONFIG_MEMCG_KMEM
-       memcg_set_kmem_cache_dying(s);
-
-       mutex_unlock(&slab_mutex);
-
-       put_online_mems();
-       put_online_cpus();
-
-       flush_memcg_workqueue(s);
-
-       get_online_cpus();
-       get_online_mems();
-
-       mutex_lock(&slab_mutex);
-#endif
-
-       err = shutdown_memcg_caches(s);
-       if (!err)
-               err = shutdown_cache(s);
-
+       err = shutdown_cache(s);
        if (err) {
                pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
                       s->name);
@@ -1016,43 +531,6 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
 }
 EXPORT_SYMBOL(kmem_cache_shrink);
 
-/**
- * kmem_cache_shrink_all - shrink a cache and all memcg caches for root cache
- * @s: The cache pointer
- */
-void kmem_cache_shrink_all(struct kmem_cache *s)
-{
-       struct kmem_cache *c;
-
-       if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || !is_root_cache(s)) {
-               kmem_cache_shrink(s);
-               return;
-       }
-
-       get_online_cpus();
-       get_online_mems();
-       kasan_cache_shrink(s);
-       __kmem_cache_shrink(s);
-
-       /*
-        * We have to take the slab_mutex to protect from the memcg list
-        * modification.
-        */
-       mutex_lock(&slab_mutex);
-       for_each_memcg_cache(c, s) {
-               /*
-                * Don't need to shrink deactivated memcg caches.
-                */
-               if (s->flags & SLAB_DEACTIVATED)
-                       continue;
-               kasan_cache_shrink(c);
-               __kmem_cache_shrink(c);
-       }
-       mutex_unlock(&slab_mutex);
-       put_online_mems();
-       put_online_cpus();
-}
-
 bool slab_is_available(void)
 {
        return slab_state >= UP;
@@ -1081,8 +559,6 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
        s->useroffset = useroffset;
        s->usersize = usersize;
 
-       slab_init_memcg_params(s);
-
        err = __kmem_cache_create(s, flags);
 
        if (err)
@@ -1103,7 +579,6 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
 
        create_boot_cache(s, name, size, flags, useroffset, usersize);
        list_add(&s->list, &slab_caches);
-       memcg_link_cache(s, NULL);
        s->refcount = 1;
        return s;
 }
@@ -1332,6 +807,18 @@ void __init create_kmalloc_caches(slab_flags_t flags)
 }
 #endif /* !CONFIG_SLOB */
 
+gfp_t kmalloc_fix_flags(gfp_t flags)
+{
+       gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
+
+       flags &= ~GFP_SLAB_BUG_MASK;
+       pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
+                       invalid_mask, &invalid_mask, flags, &flags);
+       dump_stack();
+
+       return flags;
+}
+
 /*
  * To avoid unnecessary overhead, we pass through large allocation requests
  * directly to the page allocator. We use __GFP_COMP, because we will need to
@@ -1342,12 +829,15 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
        void *ret = NULL;
        struct page *page;
 
+       if (unlikely(flags & GFP_SLAB_BUG_MASK))
+               flags = kmalloc_fix_flags(flags);
+
        flags |= __GFP_COMP;
        page = alloc_pages(flags, order);
        if (likely(page)) {
                ret = page_address(page);
-               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
-                                   1 << order);
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
+                                   PAGE_SIZE << order);
        }
        ret = kasan_kmalloc_large(ret, size, flags);
        /* As ret might get tagged, call kmemleak hook after KASAN. */
@@ -1444,12 +934,12 @@ static void print_slabinfo_header(struct seq_file *m)
 void *slab_start(struct seq_file *m, loff_t *pos)
 {
        mutex_lock(&slab_mutex);
-       return seq_list_start(&slab_root_caches, *pos);
+       return seq_list_start(&slab_caches, *pos);
 }
 
 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
 {
-       return seq_list_next(p, &slab_root_caches, pos);
+       return seq_list_next(p, &slab_caches, pos);
 }
 
 void slab_stop(struct seq_file *m, void *p)
@@ -1457,27 +947,6 @@ void slab_stop(struct seq_file *m, void *p)
        mutex_unlock(&slab_mutex);
 }
 
-static void
-memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
-{
-       struct kmem_cache *c;
-       struct slabinfo sinfo;
-
-       if (!is_root_cache(s))
-               return;
-
-       for_each_memcg_cache(c, s) {
-               memset(&sinfo, 0, sizeof(sinfo));
-               get_slabinfo(c, &sinfo);
-
-               info->active_slabs += sinfo.active_slabs;
-               info->num_slabs += sinfo.num_slabs;
-               info->shared_avail += sinfo.shared_avail;
-               info->active_objs += sinfo.active_objs;
-               info->num_objs += sinfo.num_objs;
-       }
-}
-
 static void cache_show(struct kmem_cache *s, struct seq_file *m)
 {
        struct slabinfo sinfo;
@@ -1485,10 +954,8 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m)
        memset(&sinfo, 0, sizeof(sinfo));
        get_slabinfo(s, &sinfo);
 
-       memcg_accumulate_slabinfo(s, &sinfo);
-
        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
-                  cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
+                  s->name, sinfo.active_objs, sinfo.num_objs, s->size,
                   sinfo.objects_per_slab, (1 << sinfo.cache_order));
 
        seq_printf(m, " : tunables %4u %4u %4u",
@@ -1501,9 +968,9 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m)
 
 static int slab_show(struct seq_file *m, void *p)
 {
-       struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
+       struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
 
-       if (p == slab_root_caches.next)
+       if (p == slab_caches.next)
                print_slabinfo_header(m);
        cache_show(s, m);
        return 0;
@@ -1530,13 +997,13 @@ void dump_unreclaimable_slab(void)
        pr_info("Name                      Used          Total\n");
 
        list_for_each_entry_safe(s, s2, &slab_caches, list) {
-               if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
+               if (s->flags & SLAB_RECLAIM_ACCOUNT)
                        continue;
 
                get_slabinfo(s, &sinfo);
 
                if (sinfo.num_objs > 0)
-                       pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
+                       pr_info("%-17s %10luKB %10luKB\n", s->name,
                                (sinfo.active_objs * s->size) / 1024,
                                (sinfo.num_objs * s->size) / 1024);
        }
@@ -1544,35 +1011,12 @@ void dump_unreclaimable_slab(void)
 }
 
 #if defined(CONFIG_MEMCG_KMEM)
-void *memcg_slab_start(struct seq_file *m, loff_t *pos)
-{
-       struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
-
-       mutex_lock(&slab_mutex);
-       return seq_list_start(&memcg->kmem_caches, *pos);
-}
-
-void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
-{
-       struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
-
-       return seq_list_next(p, &memcg->kmem_caches, pos);
-}
-
-void memcg_slab_stop(struct seq_file *m, void *p)
-{
-       mutex_unlock(&slab_mutex);
-}
-
 int memcg_slab_show(struct seq_file *m, void *p)
 {
-       struct kmem_cache *s = list_entry(p, struct kmem_cache,
-                                         memcg_params.kmem_caches_node);
-       struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
-
-       if (p == memcg->kmem_caches.next)
-               print_slabinfo_header(m);
-       cache_show(s, m);
+       /*
+        * Deprecated.
+        * Please, take a look at tools/cgroup/slabinfo.py .
+        */
        return 0;
 }
 #endif
@@ -1618,73 +1062,15 @@ static int __init slab_proc_init(void)
 }
 module_init(slab_proc_init);
 
-#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MEMCG_KMEM)
-/*
- * Display information about kmem caches that have child memcg caches.
- */
-static int memcg_slabinfo_show(struct seq_file *m, void *unused)
-{
-       struct kmem_cache *s, *c;
-       struct slabinfo sinfo;
-
-       mutex_lock(&slab_mutex);
-       seq_puts(m, "# <name> <css_id[:dead|deact]> <active_objs> <num_objs>");
-       seq_puts(m, " <active_slabs> <num_slabs>\n");
-       list_for_each_entry(s, &slab_root_caches, root_caches_node) {
-               /*
-                * Skip kmem caches that don't have any memcg children.
-                */
-               if (list_empty(&s->memcg_params.children))
-                       continue;
-
-               memset(&sinfo, 0, sizeof(sinfo));
-               get_slabinfo(s, &sinfo);
-               seq_printf(m, "%-17s root       %6lu %6lu %6lu %6lu\n",
-                          cache_name(s), sinfo.active_objs, sinfo.num_objs,
-                          sinfo.active_slabs, sinfo.num_slabs);
-
-               for_each_memcg_cache(c, s) {
-                       struct cgroup_subsys_state *css;
-                       char *status = "";
-
-                       css = &c->memcg_params.memcg->css;
-                       if (!(css->flags & CSS_ONLINE))
-                               status = ":dead";
-                       else if (c->flags & SLAB_DEACTIVATED)
-                               status = ":deact";
-
-                       memset(&sinfo, 0, sizeof(sinfo));
-                       get_slabinfo(c, &sinfo);
-                       seq_printf(m, "%-17s %4d%-6s %6lu %6lu %6lu %6lu\n",
-                                  cache_name(c), css->id, status,
-                                  sinfo.active_objs, sinfo.num_objs,
-                                  sinfo.active_slabs, sinfo.num_slabs);
-               }
-       }
-       mutex_unlock(&slab_mutex);
-       return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(memcg_slabinfo);
-
-static int __init memcg_slabinfo_init(void)
-{
-       debugfs_create_file("memcg_slabinfo", S_IFREG | S_IRUGO,
-                           NULL, NULL, &memcg_slabinfo_fops);
-       return 0;
-}
-
-late_initcall(memcg_slabinfo_init);
-#endif /* CONFIG_DEBUG_FS && CONFIG_MEMCG_KMEM */
 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
 
 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
                                           gfp_t flags)
 {
        void *ret;
-       size_t ks = 0;
+       size_t ks;
 
-       if (p)
-               ks = ksize(p);
+       ks = ksize(p);
 
        if (ks >= new_size) {
                p = kasan_krealloc((void *)p, new_size, flags);
@@ -1729,28 +1115,27 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
 EXPORT_SYMBOL(krealloc);
 
 /**
- * kzfree - like kfree but zero memory
+ * kfree_sensitive - Clear sensitive information in memory before freeing
  * @p: object to free memory of
  *
  * The memory of the object @p points to is zeroed before freed.
- * If @p is %NULL, kzfree() does nothing.
+ * If @p is %NULL, kfree_sensitive() does nothing.
  *
  * Note: this function zeroes the whole allocated buffer which can be a good
  * deal bigger than the requested buffer size passed to kmalloc(). So be
  * careful when using this function in performance sensitive code.
  */
-void kzfree(const void *p)
+void kfree_sensitive(const void *p)
 {
        size_t ks;
        void *mem = (void *)p;
 
-       if (unlikely(ZERO_OR_NULL_PTR(mem)))
-               return;
        ks = ksize(mem);
-       memzero_explicit(mem, ks);
+       if (ks)
+               memzero_explicit(mem, ks);
        kfree(mem);
 }
-EXPORT_SYMBOL(kzfree);
+EXPORT_SYMBOL(kfree_sensitive);
 
 /**
  * ksize - get the actual amount of memory allocated for a given object
@@ -1770,8 +1155,6 @@ size_t ksize(const void *objp)
 {
        size_t size;
 
-       if (WARN_ON_ONCE(!objp))
-               return 0;
        /*
         * We need to check that the pointed to object is valid, and only then
         * unpoison the shadow memory below. We use __kasan_check_read(), to
@@ -1785,7 +1168,7 @@ size_t ksize(const void *objp)
         * We want to perform the check before __ksize(), to avoid potentially
         * crashing in __ksize() due to accessing invalid metadata.
         */
-       if (unlikely(objp == ZERO_SIZE_PTR) || !__kasan_check_read(objp, 1))
+       if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
                return 0;
 
        size = __ksize(objp);