mm, slub: stop disabling irqs around get_partial()
authorVlastimil Babka <vbabka@suse.cz>
Tue, 11 May 2021 15:45:26 +0000 (17:45 +0200)
committerVlastimil Babka <vbabka@suse.cz>
Fri, 3 Sep 2021 23:12:21 +0000 (01:12 +0200)
The function get_partial() does not need to have irqs disabled as a whole. It's
sufficient to convert spin_lock operations to their irq saving/restoring
versions.

As a result, it's now possible to reach the page allocator from the slab
allocator without disabling and re-enabling interrupts on the way.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
mm/slub.c

index b578804..8433e50 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2010,11 +2010,12 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
  * Try to allocate a partial slab from a specific node.
  */
 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
-                             struct page **ret_page, gfp_t flags)
+                             struct page **ret_page, gfp_t gfpflags)
 {
        struct page *page, *page2;
        void *object = NULL;
        unsigned int available = 0;
+       unsigned long flags;
        int objects;
 
        /*
@@ -2026,11 +2027,11 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
        if (!n || !n->nr_partial)
                return NULL;
 
-       spin_lock(&n->list_lock);
+       spin_lock_irqsave(&n->list_lock, flags);
        list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
                void *t;
 
-               if (!pfmemalloc_match(page, flags))
+               if (!pfmemalloc_match(page, gfpflags))
                        continue;
 
                t = acquire_slab(s, n, page, object == NULL, &objects);
@@ -2051,7 +2052,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
                        break;
 
        }
-       spin_unlock(&n->list_lock);
+       spin_unlock_irqrestore(&n->list_lock, flags);
        return object;
 }
 
@@ -2779,8 +2780,10 @@ new_slab:
                        local_irq_restore(flags);
                        goto reread_page;
                }
-               if (unlikely(!slub_percpu_partial(c)))
+               if (unlikely(!slub_percpu_partial(c))) {
+                       local_irq_restore(flags);
                        goto new_objects; /* stolen by an IRQ handler */
+               }
 
                page = c->page = slub_percpu_partial(c);
                slub_set_percpu_partial(c, page);
@@ -2789,18 +2792,9 @@ new_slab:
                goto redo;
        }
 
-       local_irq_save(flags);
-       if (unlikely(c->page)) {
-               local_irq_restore(flags);
-               goto reread_page;
-       }
-
 new_objects:
 
-       lockdep_assert_irqs_disabled();
-
        freelist = get_partial(s, gfpflags, node, &page);
-       local_irq_restore(flags);
        if (freelist)
                goto check_new_page;