When we obtain a new slab page from node partial list or page allocator, we
assign it to kmem_cache_cpu, perform some checks, and if they fail, we undo
the assignment.
In order to allow doing the checks without irq disabled, restructure the code
so that the checks are done first, and kmem_cache_cpu.page assignment only
after they pass.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
lockdep_assert_irqs_disabled();
freelist = get_partial(s, gfpflags, node, &page);
lockdep_assert_irqs_disabled();
freelist = get_partial(s, gfpflags, node, &page);
- if (freelist) {
- c->page = page;
local_irq_restore(flags);
put_cpu_ptr(s->cpu_slab);
local_irq_restore(flags);
put_cpu_ptr(s->cpu_slab);
- if (c->page)
- flush_slab(s, c);
-
/*
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
/*
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
page->freelist = NULL;
stat(s, ALLOC_SLAB);
page->freelist = NULL;
stat(s, ALLOC_SLAB);
check_new_page:
if (kmem_cache_debug(s)) {
if (!alloc_debug_processing(s, page, freelist, addr)) {
/* Slab failed checks. Next slab needed */
check_new_page:
if (kmem_cache_debug(s)) {
if (!alloc_debug_processing(s, page, freelist, addr)) {
/* Slab failed checks. Next slab needed */
local_irq_restore(flags);
goto new_slab;
} else {
local_irq_restore(flags);
goto new_slab;
} else {
+ if (unlikely(c->page))
+ flush_slab(s, c);
+ c->page = page;
+
goto load_freelist;
return_single:
goto load_freelist;
return_single:
+ if (unlikely(c->page))
+ flush_slab(s, c);
+ c->page = page;
+
deactivate_slab(s, page, get_freepointer(s, freelist), c);
local_irq_restore(flags);
return freelist;
deactivate_slab(s, page, get_freepointer(s, freelist), c);
local_irq_restore(flags);
return freelist;