page_ext: introduce boot parameter 'early_page_ext'
authorLi Zhe <lizhe.67@bytedance.com>
Thu, 25 Aug 2022 10:27:14 +0000 (18:27 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 Sep 2022 03:26:02 +0000 (20:26 -0700)
In commit 2f1ee0913ce5 ("Revert "mm: use early_pfn_to_nid in
page_ext_init""), we call page_ext_init() after page_alloc_init_late() to
avoid some panic problem.  It seems that we cannot track early page
allocations in current kernel even if page structure has been initialized
early.

This patch introduces a new boot parameter 'early_page_ext' to resolve
this problem.  If we pass it to the kernel, page_ext_init() will be moved
up and the feature 'deferred initialization of struct pages' will be
disabled to initialize the page allocator early and prevent the panic
problem above.  It can help us to catch early page allocations.  This is
useful especially when we find that the free memory value is not the same
right after different kernel booting.

[akpm@linux-foundation.org: fix section issue by removing __meminitdata]
Link: https://lkml.kernel.org/r/20220825102714.669-1-lizhe.67@bytedance.com
Signed-off-by: Li Zhe <lizhe.67@bytedance.com>
Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark-PK Tsai <mark-pk.tsai@mediatek.com>
Cc: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/admin-guide/kernel-parameters.txt
include/linux/page_ext.h
init/main.c
mm/page_alloc.c
mm/page_ext.c

index 426fa89..3b95f65 100644 (file)
                        Permit 'security.evm' to be updated regardless of
                        current integrity status.
 
+       early_page_ext [KNL] Enforces page_ext initialization to earlier
+                       stages so cover more early boot allocations.
+                       Please note that as side effect some optimizations
+                       might be disabled to achieve that (e.g. parallelized
+                       memory initialization is disabled) so the boot process
+                       might take longer, especially on systems with a lot of
+                       memory. Available with CONFIG_PAGE_EXTENSION=y.
+
        failslab=
        fail_usercopy=
        fail_page_alloc=
index ed27198..22be458 100644 (file)
@@ -36,9 +36,15 @@ struct page_ext {
        unsigned long flags;
 };
 
+extern bool early_page_ext;
 extern unsigned long page_ext_size;
 extern void pgdat_page_ext_init(struct pglist_data *pgdat);
 
+static inline bool early_page_ext_enabled(void)
+{
+       return early_page_ext;
+}
+
 #ifdef CONFIG_SPARSEMEM
 static inline void page_ext_init_flatmem(void)
 {
@@ -68,6 +74,11 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
 #else /* !CONFIG_PAGE_EXTENSION */
 struct page_ext;
 
+static inline bool early_page_ext_enabled(void)
+{
+       return false;
+}
+
 static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
 {
 }
index 1fe7942..2a475d4 100644 (file)
@@ -849,6 +849,9 @@ static void __init mm_init(void)
        pgtable_init();
        debug_objects_mem_init();
        vmalloc_init();
+       /* Should be run after vmap initialization */
+       if (early_page_ext_enabled())
+               page_ext_init();
        /* Should be run before the first non-init thread is created */
        init_espfix_bsp();
        /* Should be run after espfix64 is set up. */
@@ -1618,7 +1621,8 @@ static noinline void __init kernel_init_freeable(void)
        padata_init();
        page_alloc_init_late();
        /* Initialize page ext after all struct pages are initialized. */
-       page_ext_init();
+       if (!early_page_ext_enabled())
+               page_ext_init();
 
        do_basic_setup();
 
index 48c65bf..1d42781 100644 (file)
@@ -482,6 +482,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 {
        static unsigned long prev_end_pfn, nr_initialised;
 
+       if (early_page_ext_enabled())
+               return false;
        /*
         * prev_end_pfn static that contains the end of previous zone
         * No need to protect because called very early in boot before smp_init.
index b236bdd..affe802 100644 (file)
@@ -91,6 +91,14 @@ unsigned long page_ext_size = sizeof(struct page_ext);
 static unsigned long total_usage;
 static struct page_ext *lookup_page_ext(const struct page *page);
 
+bool early_page_ext;
+static int __init setup_early_page_ext(char *str)
+{
+       early_page_ext = true;
+       return 0;
+}
+early_param("early_page_ext", setup_early_page_ext);
+
 static bool __init invoke_need_callbacks(void)
 {
        int i;