Commit 39365395 authored by Baoquan He's avatar Baoquan He Committed by Andrew Morton

riscv: kdump: use generic interface to simplify crashkernel reservation

With the help of newly changed function parse_crashkernel() and generic
reserve_crashkernel_generic(), crashkernel reservation can be simplified
by steps:

1) Add a new header file <asm/crash_core.h>, and define CRASH_ALIGN,
   CRASH_ADDR_LOW_MAX, CRASH_ADDR_HIGH_MAX and
   DEFAULT_CRASH_KERNEL_LOW_SIZE in <asm/crash_core.h>;

2) Add arch_reserve_crashkernel() to call parse_crashkernel() and
   reserve_crashkernel_generic();

3) Add ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION Kconfig in
   arch/riscv/Kconfig.

The old reserve_crashkernel_low() and reserve_crashkernel() can be
removed.

[chenjiahao16@huawei.com: fix crashkernel reserving problem on RISC-V]
  Link: https://lkml.kernel.org/r/20230925024333.730964-1-chenjiahao16@huawei.com
Link: https://lkml.kernel.org/r/20230914033142.676708-9-bhe@redhat.comSigned-off-by: default avatarBaoquan He <bhe@redhat.com>
Signed-off-by: default avatarChen Jiahao <chenjiahao16@huawei.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chen Jiahao <chenjiahao16@huawei.com>
Cc: Zhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fdc26823
......@@ -694,6 +694,9 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY
config ARCH_SUPPORTS_CRASH_DUMP
def_bool y
config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
def_bool CRASH_CORE
config COMPAT
bool "Kernel support for 32-bit U-mode"
default 64BIT
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _RISCV_CRASH_CORE_H
#define _RISCV_CRASH_CORE_H
#define CRASH_ALIGN PMD_SIZE
#define CRASH_ADDR_LOW_MAX dma32_phys_limit
#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
extern phys_addr_t memblock_end_of_DRAM(void);
#endif
......@@ -116,6 +116,8 @@ static inline void wait_for_interrupt(void)
__asm__ __volatile__ ("wfi");
}
extern phys_addr_t dma32_phys_limit;
struct device_node;
int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid);
......
......@@ -173,19 +173,6 @@ static void __init init_resources(void)
if (ret < 0)
goto error;
#ifdef CONFIG_KEXEC_CORE
if (crashk_res.start != crashk_res.end) {
ret = add_resource(&iomem_resource, &crashk_res);
if (ret < 0)
goto error;
}
if (crashk_low_res.start != crashk_low_res.end) {
ret = add_resource(&iomem_resource, &crashk_low_res);
if (ret < 0)
goto error;
}
#endif
#ifdef CONFIG_CRASH_DUMP
if (elfcorehdr_size > 0) {
elfcorehdr_res.start = elfcorehdr_addr;
......
......@@ -65,7 +65,7 @@ extern char _start[];
void *_dtb_early_va __initdata;
uintptr_t _dtb_early_pa __initdata;
static phys_addr_t dma32_phys_limit __initdata;
phys_addr_t dma32_phys_limit __initdata;
static void __init zone_sizes_init(void)
{
......@@ -1333,28 +1333,6 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */
/* Reserve 128M low memory by default for swiotlb buffer */
#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
static int __init reserve_crashkernel_low(unsigned long long low_size)
{
unsigned long long low_base;
low_base = memblock_phys_alloc_range(low_size, PMD_SIZE, 0, dma32_phys_limit);
if (!low_base) {
pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
return -ENOMEM;
}
pr_info("crashkernel low memory reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
low_base, low_base + low_size, low_size >> 20);
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
return 0;
}
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
......@@ -1362,122 +1340,25 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
* line parameter. The memory reserved is used by dump capture kernel when
* primary kernel is crashing.
*/
static void __init reserve_crashkernel(void)
static void __init arch_reserve_crashkernel(void)
{
unsigned long long crash_base = 0;
unsigned long long crash_size = 0;
unsigned long long crash_low_size = 0;
unsigned long search_start = memblock_start_of_DRAM();
unsigned long search_end = (unsigned long)dma32_phys_limit;
unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
char *cmdline = boot_command_line;
bool fixed_base = false;
bool high = false;
int ret = 0;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
/*
* Don't reserve a region for a crash kernel on a crash kernel
* since it doesn't make much sense and we have limited memory
* resources.
*/
if (is_kdump_kernel()) {
pr_info("crashkernel: ignoring reservation request\n");
return;
}
ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base, NULL, NULL);
if (ret == -ENOENT) {
/* Fallback to crashkernel=X,[high,low] */
ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
if (ret || !crash_size)
return;
/*
* crashkernel=Y,low is valid only when crashkernel=X,high
* is passed.
*/
ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
if (ret == -ENOENT)
crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
else if (ret)
return;
search_start = (unsigned long)dma32_phys_limit;
search_end = memblock_end_of_DRAM();
high = true;
} else if (ret || !crash_size) {
/* Invalid argument value specified */
return;
}
crash_size = PAGE_ALIGN(crash_size);
if (crash_base) {
fixed_base = true;
search_start = crash_base;
search_end = crash_base + crash_size;
}
/*
* Current riscv boot protocol requires 2MB alignment for
* RV64 and 4MB alignment for RV32 (hugepage size)
*
* Try to alloc from 32bit addressible physical memory so that
* swiotlb can work on the crash kernel.
*/
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
search_start, search_end);
if (crash_base == 0) {
/*
* For crashkernel=size[KMG]@offset[KMG], print out failure
* message if can't reserve the specified region.
*/
if (fixed_base) {
pr_warn("crashkernel: allocating failed with given size@offset\n");
return;
}
if (high) {
/*
* For crashkernel=size[KMG],high, if the first attempt was
* for high memory, fall back to low memory.
*/
search_start = memblock_start_of_DRAM();
search_end = (unsigned long)dma32_phys_limit;
} else {
/*
* For crashkernel=size[KMG], if the first attempt was for
* low memory, fall back to high memory, the minimum required
* low memory will be reserved later.
*/
search_start = (unsigned long)dma32_phys_limit;
search_end = memblock_end_of_DRAM();
crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
}
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
search_start, search_end);
if (crash_base == 0) {
pr_warn("crashkernel: couldn't allocate %lldKB\n",
crash_size >> 10);
&crash_size, &crash_base,
&low_size, &high);
if (ret)
return;
}
}
if ((crash_base >= dma32_phys_limit) && crash_low_size &&
reserve_crashkernel_low(crash_low_size)) {
memblock_phys_free(crash_base, crash_size);
return;
}
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
reserve_crashkernel_generic(cmdline, crash_size, crash_base,
low_size, high);
}
void __init paging_init(void)
......@@ -1495,7 +1376,7 @@ void __init misc_mem_init(void)
arch_numa_init();
sparse_init();
zone_sizes_init();
reserve_crashkernel();
arch_reserve_crashkernel();
memblock_dump_all();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment