Merge branch 'slub/cleanups' into slab/next
[linux-2.6-microblaze.git] / mm / slub.c
index 719509e..2de3c99 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab = s;
-       page->flags |= 1 << PG_slab;
+       __SetPageSlab(page);
 
        start = page_address(page);
 
@@ -1513,17 +1513,20 @@ static inline void *acquire_slab(struct kmem_cache *s,
        freelist = page->freelist;
        counters = page->counters;
        new.counters = counters;
-       if (mode)
+       if (mode) {
                new.inuse = page->objects;
+               new.freelist = NULL;
+       } else {
+               new.freelist = freelist;
+       }
 
        VM_BUG_ON(new.frozen);
        new.frozen = 1;
 
        if (!__cmpxchg_double_slab(s, page,
                        freelist, counters,
-                       NULL, new.counters,
+                       new.freelist, new.counters,
                        "acquire_slab"))
-
                return NULL;
 
        remove_partial(n, page);
@@ -1565,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
                        object = t;
                        available =  page->objects - page->inuse;
                } else {
-                       page->freelist = t;
                        available = put_cpu_partial(s, page, 0);
                        stat(s, CPU_PARTIAL_NODE);
                }
@@ -1580,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,
 /*
  * Get a page from somewhere. Search in increasing NUMA distances.
  */
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
                struct kmem_cache_cpu *c)
 {
 #ifdef CONFIG_NUMA
@@ -2777,7 +2779,7 @@ static unsigned long calculate_alignment(unsigned long flags,
 }
 
 static void
-init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
+init_kmem_cache_node(struct kmem_cache_node *n)
 {
        n->nr_partial = 0;
        spin_lock_init(&n->list_lock);
@@ -2847,7 +2849,7 @@ static void early_kmem_cache_node_alloc(int node)
        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
        init_tracking(kmem_cache_node, n);
 #endif
-       init_kmem_cache_node(n, kmem_cache_node);
+       init_kmem_cache_node(n);
        inc_slabs_node(kmem_cache_node, node, page->objects);
 
        add_partial(n, page, DEACTIVATE_TO_HEAD);
@@ -2887,7 +2889,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
                }
 
                s->node[node] = n;
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
        }
        return 1;
 }
@@ -3636,7 +3638,7 @@ static int slab_mem_going_online_callback(void *arg)
                        ret = -ENOMEM;
                        goto out;
                }
-               init_kmem_cache_node(n, s);
+               init_kmem_cache_node(n);
                s->node[nid] = n;
        }
 out:
@@ -3979,9 +3981,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                        }
                        return s;
                }
-               kfree(n);
                kfree(s);
        }
+       kfree(n);
 err:
        up_write(&slub_lock);