Merge tag 'pci-v5.13-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-microblaze.git] / mm / slub.c
index a178c73..68123b2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1533,7 +1533,8 @@ static __always_inline void kfree_hook(void *x)
        kasan_kfree_large(x);
 }
 
-static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
+static __always_inline bool slab_free_hook(struct kmem_cache *s,
+                                               void *x, bool init)
 {
        kmemleak_free_recursive(x, s->flags);
 
@@ -1559,8 +1560,25 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
                __kcsan_check_access(x, s->object_size,
                                     KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
 
-       /* KASAN might put x into memory quarantine, delaying its reuse */
-       return kasan_slab_free(s, x);
+       /*
+        * As memory initialization might be integrated into KASAN,
+        * kasan_slab_free and initialization memset's must be
+        * kept together to avoid discrepancies in behavior.
+        *
+        * The initialization memset's clear the object and the metadata,
+        * but don't touch the SLAB redzone.
+        */
+       if (init) {
+               int rsize;
+
+               if (!kasan_has_integrated_init())
+                       memset(kasan_reset_tag(x), 0, s->object_size);
+               rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
+               memset((char *)kasan_reset_tag(x) + s->inuse, 0,
+                      s->size - s->inuse - rsize);
+       }
+       /* KASAN might put x into memory quarantine, delaying its reuse. */
+       return kasan_slab_free(s, x, init);
 }
 
 static inline bool slab_free_freelist_hook(struct kmem_cache *s,
@@ -1570,10 +1588,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
        void *object;
        void *next = *head;
        void *old_tail = *tail ? *tail : *head;
-       int rsize;
 
        if (is_kfence_address(next)) {
-               slab_free_hook(s, next);
+               slab_free_hook(s, next, false);
                return true;
        }
 
@@ -1585,20 +1602,8 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                object = next;
                next = get_freepointer(s, object);
 
-               if (slab_want_init_on_free(s)) {
-                       /*
-                        * Clear the object and the metadata, but don't touch
-                        * the redzone.
-                        */
-                       memset(kasan_reset_tag(object), 0, s->object_size);
-                       rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
-                                                          : 0;
-                       memset((char *)kasan_reset_tag(object) + s->inuse, 0,
-                              s->size - s->inuse - rsize);
-
-               }
                /* If object's reuse doesn't have to be delayed */
-               if (!slab_free_hook(s, object)) {
+               if (!slab_free_hook(s, object, slab_want_init_on_free(s))) {
                        /* Move object to the new freelist */
                        set_freepointer(s, object, *head);
                        *head = object;
@@ -2823,6 +2828,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
        struct page *page;
        unsigned long tid;
        struct obj_cgroup *objcg = NULL;
+       bool init = false;
 
        s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
        if (!s)
@@ -2900,12 +2906,10 @@ redo:
        }
 
        maybe_wipe_obj_freeptr(s, object);
-
-       if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
-               memset(kasan_reset_tag(object), 0, s->object_size);
+       init = slab_want_init_on_alloc(gfpflags, s);
 
 out:
-       slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
+       slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init);
 
        return object;
 }
@@ -3237,7 +3241,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
        }
 
        if (is_kfence_address(object)) {
-               slab_free_hook(df->s, object);
+               slab_free_hook(df->s, object, false);
                __kfence_free(object);
                p[size] = NULL; /* mark object processed */
                return size;
@@ -3357,20 +3361,16 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
        c->tid = next_tid(c->tid);
        local_irq_enable();
 
-       /* Clear memory outside IRQ disabled fastpath loop */
-       if (unlikely(slab_want_init_on_alloc(flags, s))) {
-               int j;
-
-               for (j = 0; j < i; j++)
-                       memset(kasan_reset_tag(p[j]), 0, s->object_size);
-       }
-
-       /* memcg and kmem_cache debug support */
-       slab_post_alloc_hook(s, objcg, flags, size, p);
+       /*
+        * memcg and kmem_cache debug support and memory initialization.
+        * Done outside of the IRQ disabled fastpath loop.
+        */
+       slab_post_alloc_hook(s, objcg, flags, size, p,
+                               slab_want_init_on_alloc(flags, s));
        return i;
 error:
        local_irq_enable();
-       slab_post_alloc_hook(s, objcg, flags, i, p);
+       slab_post_alloc_hook(s, objcg, flags, i, p, false);
        __kmem_cache_free_bulk(s, i, p);
        return 0;
 }
@@ -3580,7 +3580,7 @@ static void early_kmem_cache_node_alloc(int node)
        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
        init_tracking(kmem_cache_node, n);
 #endif
-       n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL);
+       n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
        page->freelist = get_freepointer(kmem_cache_node, n);
        page->inuse = 1;
        page->frozen = 0;