Commit faf1c000 authored by Oscar Salvador's avatar Oscar Salvador Committed by Linus Torvalds

x86/vmemmap: optimize for consecutive sections in partial populated PMDs

We can optimize in the case we are adding consecutive sections, so no
memset(PAGE_UNUSED) is needed.

In that case, let us keep track where the unused range of the previous
memory range begins, so we can compare it with start of the range to be
added.  If they are equal, we know sections are added consecutively.

For that purpose, let us introduce 'unused_pmd_start', which always holds
the beginning of the unused memory range.

In the case a section does not contiguously follow the previous one, we
know we can memset [unused_pmd_start, PMD_BOUNDARY) with PAGE_UNUSE.

This patch is based on a similar patch by David Hildenbrand:

https://lore.kernel.org/linux-mm/20200722094558.9828-10-david@redhat.com/

Link: https://lkml.kernel.org/r/20210309214050.4674-5-osalvador@suse.deSigned-off-by: default avatarOscar Salvador <osalvador@suse.de>
Acked-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8d400913
...@@ -829,17 +829,42 @@ void __init paging_init(void) ...@@ -829,17 +829,42 @@ void __init paging_init(void)
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
#define PAGE_UNUSED 0xFD #define PAGE_UNUSED 0xFD
/*
* The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
* from unused_pmd_start to next PMD_SIZE boundary.
*/
static unsigned long unused_pmd_start __meminitdata;
static void __meminit vmemmap_flush_unused_pmd(void)
{
if (!unused_pmd_start)
return;
/*
* Clears (unused_pmd_start, PMD_END]
*/
memset((void *)unused_pmd_start, PAGE_UNUSED,
ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
unused_pmd_start = 0;
}
#ifdef CONFIG_MEMORY_HOTPLUG
/* Returns true if the PMD is completely unused and thus it can be freed */ /* Returns true if the PMD is completely unused and thus it can be freed */
static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
{ {
unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
/*
* Flush the unused range cache to ensure that memchr_inv() will work
* for the whole range.
*/
vmemmap_flush_unused_pmd();
memset((void *)addr, PAGE_UNUSED, end - addr); memset((void *)addr, PAGE_UNUSED, end - addr);
return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE);
} }
#endif
static void __meminit vmemmap_use_sub_pmd(unsigned long start) static void __meminit __vmemmap_use_sub_pmd(unsigned long start)
{ {
/* /*
* As we expect to add in the same granularity as we remove, it's * As we expect to add in the same granularity as we remove, it's
...@@ -851,13 +876,38 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start) ...@@ -851,13 +876,38 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start)
memset((void *)start, 0, sizeof(struct page)); memset((void *)start, 0, sizeof(struct page));
} }
static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
{
/*
* We only optimize if the new used range directly follows the
* previously unused range (esp., when populating consecutive sections).
*/
if (unused_pmd_start == start) {
if (likely(IS_ALIGNED(end, PMD_SIZE)))
unused_pmd_start = 0;
else
unused_pmd_start = end;
return;
}
/*
* If the range does not contiguously follows previous one, make sure
* to mark the unused range of the previous one so it can be removed.
*/
vmemmap_flush_unused_pmd();
__vmemmap_use_sub_pmd(start);
}
static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{ {
vmemmap_flush_unused_pmd();
/* /*
* Could be our memmap page is filled with PAGE_UNUSED already from a * Could be our memmap page is filled with PAGE_UNUSED already from a
* previous remove. Make sure to reset it. * previous remove. Make sure to reset it.
*/ */
vmemmap_use_sub_pmd(start); __vmemmap_use_sub_pmd(start);
/* /*
* Mark with PAGE_UNUSED the unused parts of the new memmap range * Mark with PAGE_UNUSED the unused parts of the new memmap range
...@@ -865,9 +915,14 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long ...@@ -865,9 +915,14 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long
if (!IS_ALIGNED(start, PMD_SIZE)) if (!IS_ALIGNED(start, PMD_SIZE))
memset((void *)start, PAGE_UNUSED, memset((void *)start, PAGE_UNUSED,
start - ALIGN_DOWN(start, PMD_SIZE)); start - ALIGN_DOWN(start, PMD_SIZE));
/*
* We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
* consecutive sections. Remember for the last added PMD where the
* unused range begins.
*/
if (!IS_ALIGNED(end, PMD_SIZE)) if (!IS_ALIGNED(end, PMD_SIZE))
memset((void *)end, PAGE_UNUSED, unused_pmd_start = end;
ALIGN(end, PMD_SIZE) - end);
} }
#endif #endif
...@@ -1538,7 +1593,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, ...@@ -1538,7 +1593,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
return -ENOMEM; /* no fallback */ return -ENOMEM; /* no fallback */
} else if (pmd_large(*pmd)) { } else if (pmd_large(*pmd)) {
vmemmap_verify((pte_t *)pmd, node, addr, next); vmemmap_verify((pte_t *)pmd, node, addr, next);
vmemmap_use_sub_pmd(addr); vmemmap_use_sub_pmd(addr, next);
continue; continue;
} }
if (vmemmap_populate_basepages(addr, next, node, NULL)) if (vmemmap_populate_basepages(addr, next, node, NULL))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment