mm: kfence: apply kmemleak_ignore_phys on early allocated pool
[linux-2.6-microblaze.git] / mm / kfence / core.c
index 4e7cd4c..6aff49f 100644 (file)
@@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
        unsigned long flags;
        struct slab *slab;
        void *addr;
+       const bool random_right_allocate = prandom_u32_max(2);
+       const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
+                                 !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
 
        /* Try to obtain a free object. */
        raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
@@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
         * is that the out-of-bounds accesses detected are deterministic for
         * such allocations.
         */
-       if (prandom_u32_max(2)) {
+       if (random_right_allocate) {
                /* Allocate on the "right" side, re-calculate address. */
                meta->addr += PAGE_SIZE - size;
                meta->addr = ALIGN_DOWN(meta->addr, cache->align);
@@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
        if (cache->ctor)
                cache->ctor(addr);
 
-       if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
+       if (random_fault)
                kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
 
        atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
@@ -600,14 +603,6 @@ static unsigned long kfence_init_pool(void)
                addr += 2 * PAGE_SIZE;
        }
 
-       /*
-        * The pool is live and will never be deallocated from this point on.
-        * Remove the pool object from the kmemleak object tree, as it would
-        * otherwise overlap with allocations returned by kfence_alloc(), which
-        * are registered with kmemleak through the slab post-alloc hook.
-        */
-       kmemleak_free(__kfence_pool);
-
        return 0;
 }
 
@@ -620,8 +615,16 @@ static bool __init kfence_init_pool_early(void)
 
        addr = kfence_init_pool();
 
-       if (!addr)
+       if (!addr) {
+               /*
+                * The pool is live and will never be deallocated from this point on.
+                * Ignore the pool object from the kmemleak phys object tree, as it would
+                * otherwise overlap with allocations returned by kfence_alloc(), which
+                * are registered with kmemleak through the slab post-alloc hook.
+                */
+               kmemleak_ignore_phys(__pa(__kfence_pool));
                return true;
+       }
 
        /*
         * Only release unprotected pages, and do not try to go back and change