Commit c4f20f14 authored by Li Zhe's avatar Li Zhe Committed by Andrew Morton

page_ext: introduce boot parameter 'early_page_ext'

In commit 2f1ee091 ("Revert "mm: use early_pfn_to_nid in
page_ext_init""), we call page_ext_init() after page_alloc_init_late() to
avoid some panic problem.  It seems that we cannot track early page
allocations in current kernel even if page structure has been initialized
early.

This patch introduces a new boot parameter 'early_page_ext' to resolve
this problem.  If we pass it to the kernel, page_ext_init() will be moved
up and the feature 'deferred initialization of struct pages' will be
disabled to initialize the page allocator early and prevent the panic
problem above.  It can help us to catch early page allocations.  This is
useful especially when we find that the free memory value is not the same
right after different kernel booting.

[akpm@linux-foundation.org: fix section issue by removing __meminitdata]
Link: https://lkml.kernel.org/r/20220825102714.669-1-lizhe.67@bytedance.comSigned-off-by: default avatarLi Zhe <lizhe.67@bytedance.com>
Suggested-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark-PK Tsai <mark-pk.tsai@mediatek.com>
Cc: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1a6baaa0
......@@ -1471,6 +1471,14 @@
Permit 'security.evm' to be updated regardless of
current integrity status.
early_page_ext [KNL] Enforces page_ext initialization to earlier
stages so cover more early boot allocations.
Please note that as side effect some optimizations
might be disabled to achieve that (e.g. parallelized
memory initialization is disabled) so the boot process
might take longer, especially on systems with a lot of
memory. Available with CONFIG_PAGE_EXTENSION=y.
failslab=
fail_usercopy=
fail_page_alloc=
......
......@@ -36,9 +36,15 @@ struct page_ext {
unsigned long flags;
};
extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
static inline bool early_page_ext_enabled(void)
{
return early_page_ext;
}
#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
......@@ -68,6 +74,11 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
static inline bool early_page_ext_enabled(void)
{
return false;
}
static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}
......
......@@ -849,6 +849,9 @@ static void __init mm_init(void)
pgtable_init();
debug_objects_mem_init();
vmalloc_init();
/* Should be run after vmap initialization */
if (early_page_ext_enabled())
page_ext_init();
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
......@@ -1618,6 +1621,7 @@ static noinline void __init kernel_init_freeable(void)
padata_init();
page_alloc_init_late();
/* Initialize page ext after all struct pages are initialized. */
if (!early_page_ext_enabled())
page_ext_init();
do_basic_setup();
......
......@@ -482,6 +482,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
{
static unsigned long prev_end_pfn, nr_initialised;
if (early_page_ext_enabled())
return false;
/*
* prev_end_pfn static that contains the end of previous zone
* No need to protect because called very early in boot before smp_init.
......
......@@ -91,6 +91,14 @@ unsigned long page_ext_size = sizeof(struct page_ext);
static unsigned long total_usage;
static struct page_ext *lookup_page_ext(const struct page *page);
bool early_page_ext;
static int __init setup_early_page_ext(char *str)
{
early_page_ext = true;
return 0;
}
early_param("early_page_ext", setup_early_page_ext);
static bool __init invoke_need_callbacks(void)
{
int i;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment