Commit 07626590 authored by Alastair D'Silva's avatar Alastair D'Silva Committed by Michael Ellerman

powerpc: Chunk calls to flush_dcache_range in arch_*_memory

When presented with large amounts of memory being hotplugged
(in my test case, ~890GB), the call to flush_dcache_range takes
a while (~50 seconds), triggering RCU stalls.

This patch breaks up the call into 1GB chunks, calling
cond_resched() inbetween to allow the scheduler to run.

Fixes: fb5924fd ("powerpc/mm: Flush cache on memory hot(un)plug")
Signed-off-by: default avatarAlastair D'Silva <alastair@d-silva.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20191104023305.9581-6-alastair@au1.ibm.com
parent 23eb7f56
...@@ -104,6 +104,27 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end) ...@@ -104,6 +104,27 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV; return -ENODEV;
} }
#define FLUSH_CHUNK_SIZE SZ_1G
/**
* flush_dcache_range_chunked(): Write any modified data cache blocks out to
* memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
* Does not invalidate the corresponding instruction cache blocks.
*
* @start: the start address
* @stop: the stop address (exclusive)
* @chunk: the max size of the chunks
*/
static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
unsigned long chunk)
{
unsigned long i;
for (i = start; i < stop; i += chunk) {
flush_dcache_range(i, min(stop, start + chunk));
cond_resched();
}
}
int __ref arch_add_memory(int nid, u64 start, u64 size, int __ref arch_add_memory(int nid, u64 start, u64 size,
struct mhp_restrictions *restrictions) struct mhp_restrictions *restrictions)
{ {
...@@ -120,7 +141,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, ...@@ -120,7 +141,8 @@ int __ref arch_add_memory(int nid, u64 start, u64 size,
start, start + size, rc); start, start + size, rc);
return -EFAULT; return -EFAULT;
} }
flush_dcache_range(start, start + size);
flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
return __add_pages(nid, start_pfn, nr_pages, restrictions); return __add_pages(nid, start_pfn, nr_pages, restrictions);
} }
...@@ -137,7 +159,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size, ...@@ -137,7 +159,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
/* Remove htab bolted mappings for this section of memory */ /* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start); start = (unsigned long)__va(start);
flush_dcache_range(start, start + size); flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
ret = remove_section_mapping(start, start + size); ret = remove_section_mapping(start, start + size);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment