Commit 384a746b authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton

Revert "mm: init_mlocked_on_free_v3"

There was insufficient review and no agreement that this is the right
approach.

There are serious flaws with the implementation that make processes using
mlock() not even work with simple fork() [1] and we get reliable crashes
when rebooting.

Further, simply because we might be unmapping a single PTE of a large
mlocked folio, we shouldn't zero out the whole folio.

... especially because the code can also *corrupt* urelated memory because
	kernel_init_pages(page, folio_nr_pages(folio));

Could end up writing outside of the actual folio if we work with a tail
page.

Let's revert it.  Once there is agreement that this is the right approach,
the issues were fixed and there was reasonable review and proper testing,
we can consider it again.

[1] https://lkml.kernel.org/r/4da9da2f-73e4-45fd-b62f-a8a513314057@redhat.com

Link: https://lkml.kernel.org/r/20240605091710.38961-1-david@redhat.com
Fixes: ba42b524 ("mm: init_mlocked_on_free_v3")
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reported-by: default avatarDavid Wang <00107082@163.com>
Closes: https://lore.kernel.org/lkml/20240528151340.4282-1-00107082@163.com/Reported-by: default avatarLance Yang <ioworker0@gmail.com>
Closes: https://lkml.kernel.org/r/20240601140917.43562-1-ioworker0@gmail.comAcked-by: default avatarLance Yang <ioworker0@gmail.com>
Cc: York Jasper Niebuhr <yjnworkstation@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8bb592c2
...@@ -2192,12 +2192,6 @@ ...@@ -2192,12 +2192,6 @@
Format: 0 | 1 Format: 0 | 1
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON. Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if
it was mlock'ed and not explicitly munlock'ed
afterwards.
Format: 0 | 1
Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON
init_pkru= [X86] Specify the default memory protection keys rights init_pkru= [X86] Specify the default memory protection keys rights
register contents for all processes. 0x55555554 by register contents for all processes. 0x55555554 by
default (disallow access to all but pkey 0). Can default (disallow access to all but pkey 0). Can
......
...@@ -3776,14 +3776,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); ...@@ -3776,14 +3776,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
static inline bool want_init_on_free(void) static inline bool want_init_on_free(void)
{ {
return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
&init_on_free); &init_on_free);
}
DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free);
static inline bool want_init_mlocked_on_free(void)
{
return static_branch_maybe(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON,
&init_mlocked_on_free);
} }
extern bool _debug_pagealloc_enabled_early; extern bool _debug_pagealloc_enabled_early;
......
...@@ -588,7 +588,6 @@ extern void __putback_isolated_page(struct page *page, unsigned int order, ...@@ -588,7 +588,6 @@ extern void __putback_isolated_page(struct page *page, unsigned int order,
extern void memblock_free_pages(struct page *page, unsigned long pfn, extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order); unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order);
extern void kernel_init_pages(struct page *page, int numpages);
/* /*
* This will have no effect, other than possibly generating a warning, if the * This will have no effect, other than possibly generating a warning, if the
......
...@@ -1507,12 +1507,6 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, ...@@ -1507,12 +1507,6 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
if (unlikely(folio_mapcount(folio) < 0)) if (unlikely(folio_mapcount(folio) < 0))
print_bad_pte(vma, addr, ptent, page); print_bad_pte(vma, addr, ptent, page);
} }
if (want_init_mlocked_on_free() && folio_test_mlocked(folio) &&
!delay_rmap && folio_test_anon(folio)) {
kernel_init_pages(page, folio_nr_pages(folio));
}
if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) {
*force_flush = true; *force_flush = true;
*force_break = true; *force_break = true;
......
...@@ -2523,9 +2523,6 @@ EXPORT_SYMBOL(init_on_alloc); ...@@ -2523,9 +2523,6 @@ EXPORT_SYMBOL(init_on_alloc);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(init_on_free); EXPORT_SYMBOL(init_on_free);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free);
EXPORT_SYMBOL(init_mlocked_on_free);
static bool _init_on_alloc_enabled_early __read_mostly static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf) static int __init early_init_on_alloc(char *buf)
...@@ -2543,14 +2540,6 @@ static int __init early_init_on_free(char *buf) ...@@ -2543,14 +2540,6 @@ static int __init early_init_on_free(char *buf)
} }
early_param("init_on_free", early_init_on_free); early_param("init_on_free", early_init_on_free);
static bool _init_mlocked_on_free_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON);
static int __init early_init_mlocked_on_free(char *buf)
{
return kstrtobool(buf, &_init_mlocked_on_free_enabled_early);
}
early_param("init_mlocked_on_free", early_init_mlocked_on_free);
DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
/* /*
...@@ -2578,21 +2567,12 @@ static void __init mem_debugging_and_hardening_init(void) ...@@ -2578,21 +2567,12 @@ static void __init mem_debugging_and_hardening_init(void)
} }
#endif #endif
if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early || if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
_init_mlocked_on_free_enabled_early) &&
page_poisoning_requested) { page_poisoning_requested) {
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
"will take precedence over init_on_alloc, init_on_free " "will take precedence over init_on_alloc and init_on_free\n");
"and init_mlocked_on_free\n");
_init_on_alloc_enabled_early = false; _init_on_alloc_enabled_early = false;
_init_on_free_enabled_early = false; _init_on_free_enabled_early = false;
_init_mlocked_on_free_enabled_early = false;
}
if (_init_mlocked_on_free_enabled_early && _init_on_free_enabled_early) {
pr_info("mem auto-init: init_on_free is on, "
"will take precedence over init_mlocked_on_free\n");
_init_mlocked_on_free_enabled_early = false;
} }
if (_init_on_alloc_enabled_early) { if (_init_on_alloc_enabled_early) {
...@@ -2609,17 +2589,9 @@ static void __init mem_debugging_and_hardening_init(void) ...@@ -2609,17 +2589,9 @@ static void __init mem_debugging_and_hardening_init(void)
static_branch_disable(&init_on_free); static_branch_disable(&init_on_free);
} }
if (_init_mlocked_on_free_enabled_early) { if (IS_ENABLED(CONFIG_KMSAN) &&
want_check_pages = true; (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
static_branch_enable(&init_mlocked_on_free); pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
} else {
static_branch_disable(&init_mlocked_on_free);
}
if (IS_ENABLED(CONFIG_KMSAN) && (_init_on_alloc_enabled_early ||
_init_on_free_enabled_early || _init_mlocked_on_free_enabled_early))
pr_info("mem auto-init: please make sure init_on_alloc, init_on_free and "
"init_mlocked_on_free are disabled when running KMSAN\n");
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) { if (debug_pagealloc_enabled()) {
...@@ -2658,10 +2630,9 @@ static void __init report_meminit(void) ...@@ -2658,10 +2630,9 @@ static void __init report_meminit(void)
else else
stack = "off"; stack = "off";
pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s, mlocked free:%s\n", pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
want_init_on_free() ? "on" : "off", want_init_on_free() ? "on" : "off");
want_init_mlocked_on_free() ? "on" : "off");
if (want_init_on_free()) if (want_init_on_free())
pr_info("mem auto-init: clearing system memory may take some time...\n"); pr_info("mem auto-init: clearing system memory may take some time...\n");
} }
......
...@@ -1016,7 +1016,7 @@ static inline bool should_skip_kasan_poison(struct page *page) ...@@ -1016,7 +1016,7 @@ static inline bool should_skip_kasan_poison(struct page *page)
return page_kasan_tag(page) == KASAN_TAG_KERNEL; return page_kasan_tag(page) == KASAN_TAG_KERNEL;
} }
void kernel_init_pages(struct page *page, int numpages) static void kernel_init_pages(struct page *page, int numpages)
{ {
int i; int i;
......
...@@ -255,21 +255,6 @@ config INIT_ON_FREE_DEFAULT_ON ...@@ -255,21 +255,6 @@ config INIT_ON_FREE_DEFAULT_ON
touching "cold" memory areas. Most cases see 3-5% impact. Some touching "cold" memory areas. Most cases see 3-5% impact. Some
synthetic workloads have measured as high as 8%. synthetic workloads have measured as high as 8%.
config INIT_MLOCKED_ON_FREE_DEFAULT_ON
bool "Enable mlocked memory zeroing on free"
depends on !KMSAN
help
This config has the effect of setting "init_mlocked_on_free=1"
on the kernel command line. If it is enabled, all mlocked process
memory is zeroed when freed. This restriction to mlocked memory
improves performance over "init_on_free" but can still be used to
protect confidential data like key material from content exposures
to other processes, as well as live forensics and cold boot attacks.
Any non-mlocked memory is not cleared before it is reassigned. This
configuration can be overwritten by setting "init_mlocked_on_free=0"
on the command line. The "init_on_free" boot option takes
precedence over "init_mlocked_on_free".
config CC_HAS_ZERO_CALL_USED_REGS config CC_HAS_ZERO_CALL_USED_REGS
def_bool $(cc-option,-fzero-call-used-regs=used-gpr) def_bool $(cc-option,-fzero-call-used-regs=used-gpr)
# https://github.com/ClangBuiltLinux/linux/issues/1766 # https://github.com/ClangBuiltLinux/linux/issues/1766
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment