memset(objp, 0, cachep->object_size);
        kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, caller);
-       memcg_slab_free_hook(cachep, virt_to_head_page(objp), objp);
+       memcg_slab_free_hook(cachep, &objp, 1);
 
        /*
         * Skip calling cache_free_alien() when the platform is not numa.
 
        obj_cgroup_put(objcg);
 }
 
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
-                                       void *p)
+static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
+                                       void **p, int objects)
 {
+       struct kmem_cache *s;
        struct obj_cgroup *objcg;
+       struct page *page;
        unsigned int off;
+       int i;
 
        if (!memcg_kmem_enabled())
                return;
 
-       if (!page_has_obj_cgroups(page))
-               return;
+       for (i = 0; i < objects; i++) {
+               if (unlikely(!p[i]))
+                       continue;
 
-       off = obj_to_index(s, page, p);
-       objcg = page_obj_cgroups(page)[off];
-       page_obj_cgroups(page)[off] = NULL;
+               page = virt_to_head_page(p[i]);
+               if (!page_has_obj_cgroups(page))
+                       continue;
 
-       if (!objcg)
-               return;
+               if (!s_orig)
+                       s = page->slab_cache;
+               else
+                       s = s_orig;
 
-       obj_cgroup_uncharge(objcg, obj_full_size(s));
-       mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
-                       -obj_full_size(s));
+               off = obj_to_index(s, page, p[i]);
+               objcg = page_obj_cgroups(page)[off];
+               if (!objcg)
+                       continue;
 
-       obj_cgroup_put(objcg);
+               page_obj_cgroups(page)[off] = NULL;
+               obj_cgroup_uncharge(objcg, obj_full_size(s));
+               mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
+                               -obj_full_size(s));
+               obj_cgroup_put(objcg);
+       }
 }
 
 #else /* CONFIG_MEMCG_KMEM */
 {
 }
 
-static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
-                                       void *p)
+static inline void memcg_slab_free_hook(struct kmem_cache *s,
+                                       void **p, int objects)
 {
 }
 #endif /* CONFIG_MEMCG_KMEM */
 
        struct kmem_cache_cpu *c;
        unsigned long tid;
 
-       memcg_slab_free_hook(s, page, head);
+       memcg_slab_free_hook(s, &head, 1);
 redo:
        /*
         * Determine the currently cpus per cpu slab.
        if (WARN_ON(!size))
                return;
 
+       memcg_slab_free_hook(s, p, size);
        do {
                struct detached_freelist df;