mempool: use new mempool KASAN hooks
authorAndrey Konovalov <andreyknvl@google.com>
Tue, 19 Dec 2023 22:28:58 +0000 (23:28 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:39 +0000 (11:58 -0800)
Update the mempool code to use the new mempool KASAN hooks.

Rely on the return value of kasan_mempool_poison_object and
kasan_mempool_poison_pages to prevent double-free and invalid-free bugs.

Link: https://lkml.kernel.org/r/d36fc4a6865bdbd297cadb46b67641d436849f4c.1703024586.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Breno Leitao <leitao@debian.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/mempool.c

index 1fd3947..103dc47 100644 (file)
@@ -112,32 +112,34 @@ static inline void poison_element(mempool_t *pool, void *element)
 }
 #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
 
-static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
+static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
 {
        if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
-               kasan_mempool_poison_object(element);
+               return kasan_mempool_poison_object(element);
        else if (pool->alloc == mempool_alloc_pages)
-               kasan_poison_pages(element, (unsigned long)pool->pool_data,
-                                  false);
+               return kasan_mempool_poison_pages(element,
+                                               (unsigned long)pool->pool_data);
+       return true;
 }
 
 static void kasan_unpoison_element(mempool_t *pool, void *element)
 {
        if (pool->alloc == mempool_kmalloc)
-               kasan_unpoison_range(element, (size_t)pool->pool_data);
+               kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
        else if (pool->alloc == mempool_alloc_slab)
-               kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
+               kasan_mempool_unpoison_object(element,
+                                             kmem_cache_size(pool->pool_data));
        else if (pool->alloc == mempool_alloc_pages)
-               kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
-                                    false);
+               kasan_mempool_unpoison_pages(element,
+                                            (unsigned long)pool->pool_data);
 }
 
 static __always_inline void add_element(mempool_t *pool, void *element)
 {
        BUG_ON(pool->curr_nr >= pool->min_nr);
        poison_element(pool, element);
-       kasan_poison_element(pool, element);
-       pool->elements[pool->curr_nr++] = element;
+       if (kasan_poison_element(pool, element))
+               pool->elements[pool->curr_nr++] = element;
 }
 
 static void *remove_element(mempool_t *pool)