RISC-V CPU Idle Support
[linux-2.6-microblaze.git] / mm / kfence / core.c
index 13128fa..2f9fdfd 100644 (file)
 #define KFENCE_WARN_ON(cond)                                                   \
        ({                                                                     \
                const bool __cond = WARN_ON(cond);                             \
-               if (unlikely(__cond))                                          \
+               if (unlikely(__cond)) {                                        \
                        WRITE_ONCE(kfence_enabled, false);                     \
+                       disabled_by_warn = true;                               \
+               }                                                              \
                __cond;                                                        \
        })
 
 /* === Data ================================================================= */
 
 static bool kfence_enabled __read_mostly;
+static bool disabled_by_warn __read_mostly;
 
 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
@@ -55,6 +58,7 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
 #endif
 #define MODULE_PARAM_PREFIX "kfence."
 
+static int kfence_enable_late(void);
 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
 {
        unsigned long num;
@@ -65,10 +69,11 @@ static int param_set_sample_interval(const char *val, const struct kernel_param
 
        if (!num) /* Using 0 to indicate KFENCE is disabled. */
                WRITE_ONCE(kfence_enabled, false);
-       else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
-               return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
 
        *((unsigned long *)kp->arg) = num;
+
+       if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
+               return disabled_by_warn ? -EINVAL : kfence_enable_late();
        return 0;
 }
 
@@ -90,8 +95,12 @@ module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_inte
 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
 
+/* If true, use a deferrable timer. */
+static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
+module_param_named(deferrable, kfence_deferrable, bool, 0444);
+
 /* The pool of pages used for guard pages and objects. */
-char *__kfence_pool __ro_after_init;
+char *__kfence_pool __read_mostly;
 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
 
 /*
@@ -532,17 +541,19 @@ static void rcu_guarded_free(struct rcu_head *h)
        kfence_guarded_free((void *)meta->addr, meta, false);
 }
 
-static bool __init kfence_init_pool(void)
+/*
+ * Initialization of the KFENCE pool after its allocation.
+ * Returns 0 on success; otherwise returns the address up to
+ * which partial initialization succeeded.
+ */
+static unsigned long kfence_init_pool(void)
 {
        unsigned long addr = (unsigned long)__kfence_pool;
        struct page *pages;
        int i;
 
-       if (!__kfence_pool)
-               return false;
-
        if (!arch_kfence_init_pool())
-               goto err;
+               return addr;
 
        pages = virt_to_page(addr);
 
@@ -560,7 +571,7 @@ static bool __init kfence_init_pool(void)
 
                /* Verify we do not have a compound head page. */
                if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
-                       goto err;
+                       return addr;
 
                __SetPageSlab(&pages[i]);
        }
@@ -573,7 +584,7 @@ static bool __init kfence_init_pool(void)
         */
        for (i = 0; i < 2; i++) {
                if (unlikely(!kfence_protect(addr)))
-                       goto err;
+                       return addr;
 
                addr += PAGE_SIZE;
        }
@@ -590,7 +601,7 @@ static bool __init kfence_init_pool(void)
 
                /* Protect the right redzone. */
                if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
-                       goto err;
+                       return addr;
 
                addr += 2 * PAGE_SIZE;
        }
@@ -603,9 +614,21 @@ static bool __init kfence_init_pool(void)
         */
        kmemleak_free(__kfence_pool);
 
-       return true;
+       return 0;
+}
+
+static bool __init kfence_init_pool_early(void)
+{
+       unsigned long addr;
+
+       if (!__kfence_pool)
+               return false;
+
+       addr = kfence_init_pool();
+
+       if (!addr)
+               return true;
 
-err:
        /*
         * Only release unprotected pages, and do not try to go back and change
         * page attributes due to risk of failing to do so as well. If changing
@@ -618,6 +641,26 @@ err:
        return false;
 }
 
+static bool kfence_init_pool_late(void)
+{
+       unsigned long addr, free_size;
+
+       addr = kfence_init_pool();
+
+       if (!addr)
+               return true;
+
+       /* Same as above. */
+       free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
+#ifdef CONFIG_CONTIG_ALLOC
+       free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE);
+#else
+       free_pages_exact((void *)addr, free_size);
+#endif
+       __kfence_pool = NULL;
+       return false;
+}
+
 /* === DebugFS Interface ==================================================== */
 
 static int stats_show(struct seq_file *seq, void *v)
@@ -701,6 +744,8 @@ late_initcall(kfence_debugfs_init);
 
 /* === Allocation Gate Timer ================================================ */
 
+static struct delayed_work kfence_timer;
+
 #ifdef CONFIG_KFENCE_STATIC_KEYS
 /* Wait queue to wake up allocation-gate timer task. */
 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
@@ -723,7 +768,6 @@ static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
  * avoids IPIs, at the cost of not immediately capturing allocations if the
  * instructions remain cached.
  */
-static struct delayed_work kfence_timer;
 static void toggle_allocation_gate(struct work_struct *work)
 {
        if (!READ_ONCE(kfence_enabled))
@@ -751,7 +795,6 @@ static void toggle_allocation_gate(struct work_struct *work)
        queue_delayed_work(system_unbound_wq, &kfence_timer,
                           msecs_to_jiffies(kfence_sample_interval));
 }
-static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
 
 /* === Public interface ===================================================== */
 
@@ -766,25 +809,77 @@ void __init kfence_alloc_pool(void)
                pr_err("failed to allocate pool\n");
 }
 
+static void kfence_init_enable(void)
+{
+       if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
+               static_branch_enable(&kfence_allocation_key);
+
+       if (kfence_deferrable)
+               INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
+       else
+               INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
+
+       WRITE_ONCE(kfence_enabled, true);
+       queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
+
+       pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
+               CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
+               (void *)(__kfence_pool + KFENCE_POOL_SIZE));
+}
+
 void __init kfence_init(void)
 {
+       stack_hash_seed = (u32)random_get_entropy();
+
        /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
        if (!kfence_sample_interval)
                return;
 
-       stack_hash_seed = (u32)random_get_entropy();
-       if (!kfence_init_pool()) {
+       if (!kfence_init_pool_early()) {
                pr_err("%s failed\n", __func__);
                return;
        }
 
-       if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
-               static_branch_enable(&kfence_allocation_key);
+       kfence_init_enable();
+}
+
+static int kfence_init_late(void)
+{
+       const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
+#ifdef CONFIG_CONTIG_ALLOC
+       struct page *pages;
+
+       pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
+       if (!pages)
+               return -ENOMEM;
+       __kfence_pool = page_to_virt(pages);
+#else
+       if (nr_pages > MAX_ORDER_NR_PAGES) {
+               pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
+               return -EINVAL;
+       }
+       __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+       if (!__kfence_pool)
+               return -ENOMEM;
+#endif
+
+       if (!kfence_init_pool_late()) {
+               pr_err("%s failed\n", __func__);
+               return -EBUSY;
+       }
+
+       kfence_init_enable();
+       return 0;
+}
+
+static int kfence_enable_late(void)
+{
+       if (!__kfence_pool)
+               return kfence_init_late();
+
        WRITE_ONCE(kfence_enabled, true);
        queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
-       pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
-               CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
-               (void *)(__kfence_pool + KFENCE_POOL_SIZE));
+       return 0;
 }
 
 void kfence_shutdown_cache(struct kmem_cache *s)