Commit 1e15d374 authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Andrew Morton

Revert "x86: kmsan: sync metadata pages on page fault"

This reverts commit 3f1e2c7a.

As noticed by Qun-Wei Lin, arch_sync_kernel_mappings() in
arch/x86/mm/fault.c is only used with CONFIG_X86_32, whereas KMSAN is only
supported on x86_64, where this code is not compiled.

The patch in question dates back to downstream KMSAN branch based on
v5.8-rc5, it sneaked into upstream unnoticed in v6.1.

Link: https://lkml.kernel.org/r/20230111101806.3236991-1-glider@google.comSigned-off-by: default avatarAlexander Potapenko <glider@google.com>
Reported-by: default avatarQun-Wei Lin <qun-wei.lin@mediatek.com>
  Link: https://github.com/google/kmsan/issues/91
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Marco Elver <elver@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 82b24936
......@@ -260,7 +260,7 @@ static noinline int vmalloc_fault(unsigned long address)
}
NOKPROBE_SYMBOL(vmalloc_fault);
static void __arch_sync_kernel_mappings(unsigned long start, unsigned long end)
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
......@@ -284,27 +284,6 @@ static void __arch_sync_kernel_mappings(unsigned long start, unsigned long end)
}
}
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
__arch_sync_kernel_mappings(start, end);
#ifdef CONFIG_KMSAN
/*
* KMSAN maintains two additional metadata page mappings for the
* [VMALLOC_START, VMALLOC_END) range. These mappings start at
* KMSAN_VMALLOC_SHADOW_START and KMSAN_VMALLOC_ORIGIN_START and
* have to be synced together with the vmalloc memory mapping.
*/
if (start >= VMALLOC_START && end < VMALLOC_END) {
__arch_sync_kernel_mappings(
start - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START,
end - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START);
__arch_sync_kernel_mappings(
start - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START,
end - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START);
}
#endif
}
static bool low_pfn(unsigned long pfn)
{
return pfn < max_low_pfn;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment