Commit 996b4a7d authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390/mem_detect: remove artificial kdump memory types

Simplify the memory detection code a bit by removing the CHUNK_OLDMEM
and CHUNK_CRASHK memory types.
They are not needed. Everything that is needed is a mechanism to
insert holes into the detected memory.
Reviewed-by: default avatarMichael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent d3383632
...@@ -33,8 +33,6 @@ ...@@ -33,8 +33,6 @@
#define CHUNK_READ_WRITE 0 #define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1 #define CHUNK_READ_ONLY 1
#define CHUNK_OLDMEM 4
#define CHUNK_CRASHK 5
struct mem_chunk { struct mem_chunk {
unsigned long addr; unsigned long addr;
...@@ -47,8 +45,8 @@ extern int memory_end_set; ...@@ -47,8 +45,8 @@ extern int memory_end_set;
extern unsigned long memory_end; extern unsigned long memory_end;
void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr, void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size, int type); unsigned long size);
#define PRIMARY_SPACE_MODE 0 #define PRIMARY_SPACE_MODE 0
#define ACCESS_REGISTER_MODE 1 #define ACCESS_REGISTER_MODE 1
......
...@@ -89,7 +89,7 @@ static struct mem_chunk *get_memory_layout(void) ...@@ -89,7 +89,7 @@ static struct mem_chunk *get_memory_layout(void)
chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk)); chunk_array = kzalloc_panic(MEMORY_CHUNKS * sizeof(struct mem_chunk));
detect_memory_layout(chunk_array, 0); detect_memory_layout(chunk_array, 0);
create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE, CHUNK_CRASHK); create_mem_hole(chunk_array, OLDMEM_BASE, OLDMEM_SIZE);
return chunk_array; return chunk_array;
} }
...@@ -344,7 +344,7 @@ static int loads_init(Elf64_Phdr *phdr, u64 loads_offset) ...@@ -344,7 +344,7 @@ static int loads_init(Elf64_Phdr *phdr, u64 loads_offset)
for (i = 0; i < MEMORY_CHUNKS; i++) { for (i = 0; i < MEMORY_CHUNKS; i++) {
mem_chunk = &chunk_array[i]; mem_chunk = &chunk_array[i];
if (mem_chunk->size == 0) if (mem_chunk->size == 0)
break; continue;
if (chunk_array[i].type != CHUNK_READ_WRITE && if (chunk_array[i].type != CHUNK_READ_WRITE &&
chunk_array[i].type != CHUNK_READ_ONLY) chunk_array[i].type != CHUNK_READ_ONLY)
continue; continue;
......
...@@ -463,14 +463,10 @@ static void __init setup_resources(void) ...@@ -463,14 +463,10 @@ static void __init setup_resources(void)
for (i = 0; i < MEMORY_CHUNKS; i++) { for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size) if (!memory_chunk[i].size)
continue; continue;
if (memory_chunk[i].type == CHUNK_OLDMEM ||
memory_chunk[i].type == CHUNK_CRASHK)
continue;
res = alloc_bootmem_low(sizeof(*res)); res = alloc_bootmem_low(sizeof(*res));
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
switch (memory_chunk[i].type) { switch (memory_chunk[i].type) {
case CHUNK_READ_WRITE: case CHUNK_READ_WRITE:
case CHUNK_CRASHK:
res->name = "System RAM"; res->name = "System RAM";
break; break;
case CHUNK_READ_ONLY: case CHUNK_READ_ONLY:
...@@ -527,7 +523,7 @@ static void __init setup_memory_end(void) ...@@ -527,7 +523,7 @@ static void __init setup_memory_end(void)
unsigned long align; unsigned long align;
chunk = &memory_chunk[i]; chunk = &memory_chunk[i];
if (chunk->type == CHUNK_OLDMEM) if (!chunk->size)
continue; continue;
align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1); align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
start = (chunk->addr + align - 1) & ~(align - 1); start = (chunk->addr + align - 1) & ~(align - 1);
...@@ -579,7 +575,7 @@ static void __init setup_memory_end(void) ...@@ -579,7 +575,7 @@ static void __init setup_memory_end(void)
for (i = 0; i < MEMORY_CHUNKS; i++) { for (i = 0; i < MEMORY_CHUNKS; i++) {
struct mem_chunk *chunk = &memory_chunk[i]; struct mem_chunk *chunk = &memory_chunk[i];
if (chunk->type == CHUNK_OLDMEM) if (!chunk->size)
continue; continue;
if (chunk->addr >= memory_end) { if (chunk->addr >= memory_end) {
memset(chunk, 0, sizeof(*chunk)); memset(chunk, 0, sizeof(*chunk));
...@@ -680,15 +676,6 @@ static int __init verify_crash_base(unsigned long crash_base, ...@@ -680,15 +676,6 @@ static int __init verify_crash_base(unsigned long crash_base,
return -EINVAL; return -EINVAL;
} }
/*
* Reserve kdump memory by creating a memory hole in the mem_chunk array
*/
static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
int type)
{
create_mem_hole(memory_chunk, addr, size, type);
}
/* /*
* When kdump is enabled, we have to ensure that no memory from * When kdump is enabled, we have to ensure that no memory from
* the area [0 - crashkernel memory size] and * the area [0 - crashkernel memory size] and
...@@ -730,8 +717,8 @@ static void reserve_oldmem(void) ...@@ -730,8 +717,8 @@ static void reserve_oldmem(void)
real_size = max(real_size, chunk->addr + chunk->size); real_size = max(real_size, chunk->addr + chunk->size);
} }
reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM); create_mem_hole(memory_chunk, OLDMEM_BASE, OLDMEM_SIZE);
reserve_kdump_bootmem(OLDMEM_SIZE, real_size - OLDMEM_SIZE, CHUNK_OLDMEM); create_mem_hole(memory_chunk, OLDMEM_SIZE, real_size - OLDMEM_SIZE);
if (OLDMEM_BASE + OLDMEM_SIZE == real_size) if (OLDMEM_BASE + OLDMEM_SIZE == real_size)
saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1; saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
else else
...@@ -774,7 +761,7 @@ static void __init reserve_crashkernel(void) ...@@ -774,7 +761,7 @@ static void __init reserve_crashkernel(void)
crashk_res.start = crash_base; crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1; crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res); insert_resource(&iomem_resource, &crashk_res);
reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK); create_mem_hole(memory_chunk, crash_base, crash_size);
pr_info("Reserving %lluMB of memory at %lluMB " pr_info("Reserving %lluMB of memory at %lluMB "
"for crashkernel (System RAM: %luMB)\n", "for crashkernel (System RAM: %luMB)\n",
crash_size >> 20, crash_base >> 20, memory_end >> 20); crash_size >> 20, crash_base >> 20, memory_end >> 20);
...@@ -846,11 +833,10 @@ static void __init setup_memory(void) ...@@ -846,11 +833,10 @@ static void __init setup_memory(void)
* Register RAM areas with the bootmem allocator. * Register RAM areas with the bootmem allocator.
*/ */
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { for (i = 0; i < MEMORY_CHUNKS; i++) {
unsigned long start_chunk, end_chunk, pfn; unsigned long start_chunk, end_chunk, pfn;
if (memory_chunk[i].type != CHUNK_READ_WRITE && if (!memory_chunk[i].size)
memory_chunk[i].type != CHUNK_CRASHK)
continue; continue;
start_chunk = PFN_DOWN(memory_chunk[i].addr); start_chunk = PFN_DOWN(memory_chunk[i].addr);
end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
......
...@@ -95,82 +95,40 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize) ...@@ -95,82 +95,40 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize)
EXPORT_SYMBOL(detect_memory_layout); EXPORT_SYMBOL(detect_memory_layout);
/* /*
* Move memory chunks array from index "from" to index "to" * Create memory hole with given address and size.
*/ */
static void mem_chunk_move(struct mem_chunk chunk[], int to, int from) void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
unsigned long size)
{ {
int cnt = MEMORY_CHUNKS - to; int i;
memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
}
/*
* Initialize memory chunk
*/
static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
unsigned long size, int type)
{
chunk->type = type;
chunk->addr = addr;
chunk->size = size;
}
/*
* Create memory hole with given address, size, and type
*/
void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
unsigned long size, int type)
{
unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
int i, ch_type;
for (i = 0; i < MEMORY_CHUNKS; i++) { for (i = 0; i < MEMORY_CHUNKS; i++) {
if (chunk[i].size == 0) struct mem_chunk *chunk = &mem_chunk[i];
continue;
/* Define chunk properties */
ch_start = chunk[i].addr;
ch_size = chunk[i].size;
ch_end = ch_start + ch_size - 1;
ch_type = chunk[i].type;
/* Is memory chunk hit by memory hole? */ if (chunk->size == 0)
if (addr + size <= ch_start) continue;
continue; /* No: memory hole in front of chunk */ if (addr > chunk->addr + chunk->size)
if (addr > ch_end) continue;
continue; /* No: memory hole after chunk */ if (addr + size <= chunk->addr)
continue;
/* Yes: Define local hole properties */ /* Split */
lh_start = max(addr, chunk[i].addr); if ((addr > chunk->addr) &&
lh_end = min(addr + size - 1, ch_end); (addr + size < chunk->addr + chunk->size)) {
lh_size = lh_end - lh_start + 1; struct mem_chunk *new = chunk + 1;
if (lh_start == ch_start && lh_end == ch_end) { memmove(new, chunk, (MEMORY_CHUNKS-i-1) * sizeof(*new));
/* Hole covers complete memory chunk */ new->addr = addr + size;
mem_chunk_init(&chunk[i], lh_start, lh_size, type); new->size = chunk->addr + chunk->size - new->addr;
} else if (lh_end == ch_end) { chunk->size = addr - chunk->addr;
/* Hole starts in memory chunk and convers chunk end */ continue;
mem_chunk_move(chunk, i + 1, i); } else if ((addr <= chunk->addr) &&
mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size, (addr + size >= chunk->addr + chunk->size)) {
ch_type); memset(chunk, 0 , sizeof(*chunk));
mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); } else if (addr + size < chunk->addr + chunk->size) {
i += 1; chunk->size = chunk->addr + chunk->size - addr - size;
} else if (lh_start == ch_start) { chunk->addr = addr + size;
/* Hole ends in memory chunk */ } else if (addr > chunk->addr) {
mem_chunk_move(chunk, i + 1, i); chunk->size = addr - chunk->addr;
mem_chunk_init(&chunk[i], lh_start, lh_size, type);
mem_chunk_init(&chunk[i + 1], lh_end + 1,
ch_size - lh_size, ch_type);
break;
} else {
/* Hole splits memory chunk */
mem_chunk_move(chunk, i + 2, i);
mem_chunk_init(&chunk[i], ch_start,
lh_start - ch_start, ch_type);
mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
mem_chunk_init(&chunk[i + 2], lh_end + 1,
ch_end - lh_end, ch_type);
break;
} }
} }
} }
...@@ -375,9 +375,8 @@ void __init vmem_map_init(void) ...@@ -375,9 +375,8 @@ void __init vmem_map_init(void)
ro_start = PFN_ALIGN((unsigned long)&_stext); ro_start = PFN_ALIGN((unsigned long)&_stext);
ro_end = (unsigned long)&_eshared & PAGE_MASK; ro_end = (unsigned long)&_eshared & PAGE_MASK;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { for (i = 0; i < MEMORY_CHUNKS; i++) {
if (memory_chunk[i].type == CHUNK_CRASHK || if (!memory_chunk[i].size)
memory_chunk[i].type == CHUNK_OLDMEM)
continue; continue;
start = memory_chunk[i].addr; start = memory_chunk[i].addr;
end = memory_chunk[i].addr + memory_chunk[i].size; end = memory_chunk[i].addr + memory_chunk[i].size;
...@@ -412,9 +411,6 @@ static int __init vmem_convert_memory_chunk(void) ...@@ -412,9 +411,6 @@ static int __init vmem_convert_memory_chunk(void)
for (i = 0; i < MEMORY_CHUNKS; i++) { for (i = 0; i < MEMORY_CHUNKS; i++) {
if (!memory_chunk[i].size) if (!memory_chunk[i].size)
continue; continue;
if (memory_chunk[i].type == CHUNK_CRASHK ||
memory_chunk[i].type == CHUNK_OLDMEM)
continue;
seg = kzalloc(sizeof(*seg), GFP_KERNEL); seg = kzalloc(sizeof(*seg), GFP_KERNEL);
if (!seg) if (!seg)
panic("Out of memory...\n"); panic("Out of memory...\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment