Commit 4a03b2ac authored by Youling Tang's avatar Youling Tang Committed by Huacai Chen

LoongArch: Add kexec support

Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to
the LoongArch architecture, so as to add support for the kexec re-boot
mechanism (CONFIG_KEXEC) on LoongArch platforms.

Kexec supports loading vmlinux.elf in ELF format and vmlinux.efi in PE
format.

I tested kexec on LoongArch machines (Loongson-3A5000) and it works as
expected:

 $ sudo kexec -l /boot/vmlinux.efi --reuse-cmdline
 $ sudo kexec -e
Signed-off-by: default avatarYouling Tang <tangyouling@loongson.cn>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent 2d2c3952
...@@ -426,6 +426,17 @@ config ARCH_IOREMAP ...@@ -426,6 +426,17 @@ config ARCH_IOREMAP
protection support. However, you can enable LoongArch DMW-based protection support. However, you can enable LoongArch DMW-based
ioremap() for better performance. ioremap() for better performance.
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similarity to the exec system call.
config SECCOMP config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode" bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS depends on PROC_FS
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* kexec.h for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_KEXEC_H
#define _ASM_KEXEC_H
#include <asm/stacktrace.h>
#include <asm/page.h>
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
/* Reserve a page for the control code buffer */
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_LOONGARCH
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
prepare_frametrace(newregs);
}
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
unsigned long efi_boot;
unsigned long cmdline_ptr;
unsigned long systable_ptr;
};
typedef void (*do_kexec_t)(unsigned long efi_boot,
unsigned long cmdline_ptr,
unsigned long systable_ptr,
unsigned long start_addr,
unsigned long first_ind_entry);
struct kimage;
extern const unsigned char relocate_new_kernel[];
extern const size_t relocate_new_kernel_size;
extern void kexec_reboot(void);
#ifdef CONFIG_SMP
extern atomic_t kexec_ready_to_reboot;
extern const unsigned char kexec_smp_wait[];
#endif
#endif /* !_ASM_KEXEC_H */
...@@ -25,6 +25,8 @@ obj-$(CONFIG_NUMA) += numa.o ...@@ -25,6 +25,8 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
......
...@@ -21,7 +21,11 @@ ...@@ -21,7 +21,11 @@
_head: _head:
.word MZ_MAGIC /* "MZ", MS-DOS header */ .word MZ_MAGIC /* "MZ", MS-DOS header */
.org 0x3c /* 0x04 ~ 0x3b reserved */ .org 0x8
.dword kernel_entry /* Kernel entry point */
.dword _end - _text /* Kernel image effective size */
.quad 0 /* Kernel image load offset from start of RAM */
.org 0x3c /* 0x20 ~ 0x3b reserved */
.long pe_header - _head /* Offset to the PE header */ .long pe_header - _head /* Offset to the PE header */
pe_header: pe_header:
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* machine_kexec.c for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
/* 0x100000 ~ 0x200000 is safe */
#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
static unsigned long reboot_code_buffer;
#ifdef CONFIG_SMP
static void (*relocated_kexec_smp_wait)(void *);
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
#endif
static unsigned long efi_boot;
static unsigned long cmdline_ptr;
static unsigned long systable_ptr;
static unsigned long start_addr;
static unsigned long first_ind_entry;
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
pr_debug("kexec kimage info:\n");
pr_debug("\ttype: %d\n", kimage->type);
pr_debug("\tstart: %lx\n", kimage->start);
pr_debug("\thead: %lx\n", kimage->head);
pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
for (i = 0; i < kimage->nr_segments; i++) {
pr_debug("\t segment[%lu]: %016lx - %016lx", i,
kimage->segment[i].mem,
kimage->segment[i].mem + kimage->segment[i].memsz);
pr_debug("\t\t0x%lx bytes, %lu pages\n",
(unsigned long)kimage->segment[i].memsz,
(unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
}
}
int machine_kexec_prepare(struct kimage *kimage)
{
int i;
char *bootloader = "kexec";
void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
kexec_image_info(kimage);
kimage->arch.efi_boot = fw_arg0;
kimage->arch.systable_ptr = fw_arg2;
/* Find the command line */
for (i = 0; i < kimage->nr_segments; i++) {
if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
break;
}
}
if (!kimage->arch.cmdline_ptr) {
pr_err("Command line not included in the provided image\n");
return -EINVAL;
}
/* kexec need a safe page to save reboot_code_buffer */
kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_smp_wait cycle */
relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
#endif
return 0;
}
void machine_kexec_cleanup(struct kimage *kimage)
{
}
void kexec_reboot(void)
{
do_kexec_t do_kexec = NULL;
/*
* We know we were online, and there will be no incoming IPIs at
* this point.
*/
set_cpu_online(smp_processor_id(), true);
/* Ensure remote CPUs observe that we're online before rebooting. */
smp_mb__after_atomic();
/*
* Make sure we get correct instructions written by the
* machine_kexec_prepare() CPU.
*/
__asm__ __volatile__ ("\tibar 0\n"::);
#ifdef CONFIG_SMP
/* All secondary cpus go to kexec_smp_wait */
if (smp_processor_id() > 0) {
relocated_kexec_smp_wait(NULL);
unreachable();
}
#endif
do_kexec = (void *)reboot_code_buffer;
do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
unreachable();
}
#ifdef CONFIG_SMP
static void kexec_shutdown_secondary(void *regs)
{
int cpu = smp_processor_id();
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
}
#endif
void machine_shutdown(void)
{
int cpu;
/* All CPUs go to reboot_code_buffer */
for_each_possible_cpu(cpu)
if (!cpu_online(cpu))
cpu_device_up(get_cpu_device(cpu));
#ifdef CONFIG_SMP
smp_call_function(kexec_shutdown_secondary, NULL, 0);
#endif
}
void machine_crash_shutdown(struct pt_regs *regs)
{
}
void machine_kexec(struct kimage *image)
{
unsigned long entry, *ptr;
struct kimage_arch *internal = &image->arch;
efi_boot = internal->efi_boot;
cmdline_ptr = internal->cmdline_ptr;
systable_ptr = internal->systable_ptr;
start_addr = (unsigned long)phys_to_virt(image->start);
first_ind_entry = (unsigned long)phys_to_virt(image->head & PAGE_MASK);
/*
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through XKPRANGE
* hence the phys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
/* Mark offline before disabling local irq. */
set_cpu_online(smp_processor_id(), false);
/* We do not want to be bothered. */
local_irq_disable();
pr_notice("EFI boot flag 0x%lx\n", efi_boot);
pr_notice("Command line at 0x%lx\n", cmdline_ptr);
pr_notice("System table at 0x%lx\n", systable_ptr);
pr_notice("We will call new kernel at 0x%lx\n", start_addr);
pr_notice("Bye ...\n");
/* Make reboot code buffer available to the boot CPU. */
flush_cache_all();
#ifdef CONFIG_SMP
atomic_set(&kexec_ready_to_reboot, 1);
#endif
kexec_reboot();
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* relocate_kernel.S for kexec
*
* Copyright (C) 2022 Loongson Technology Corporation Limited
*/
#include <linux/kexec.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/loongarch.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
SYM_CODE_START(relocate_new_kernel)
/*
* a0: EFI boot flag for the new kernel
* a1: Command line pointer for the new kernel
* a2: System table pointer for the new kernel
* a3: Start address to jump to after relocation
* a4: Pointer to the current indirection page entry
*/
move s0, a4
process_entry:
PTR_L s1, s0, 0
PTR_ADDI s0, s0, SZREG
/* destination page */
andi s2, s1, IND_DESTINATION
beqz s2, 1f
li.w t0, ~0x1
and s3, s1, t0 /* store destination addr in s3 */
b process_entry
1:
/* indirection page, update s0 */
andi s2, s1, IND_INDIRECTION
beqz s2, 1f
li.w t0, ~0x2
and s0, s1, t0
b process_entry
1:
/* done page */
andi s2, s1, IND_DONE
beqz s2, 1f
b done
1:
/* source page */
andi s2, s1, IND_SOURCE
beqz s2, process_entry
li.w t0, ~0x8
and s1, s1, t0
li.w s5, (1 << _PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
REG_L s4, s1, 0
REG_S s4, s3, 0
PTR_ADDI s3, s3, SZREG
PTR_ADDI s1, s1, SZREG
LONG_ADDI s5, s5, -1
beqz s5, process_entry
b copy_word
b process_entry
done:
ibar 0
dbar 0
/*
* Jump to the new kernel,
* make sure the values of a0, a1, a2 and a3 are not changed.
*/
jr a3
SYM_CODE_END(relocate_new_kernel)
#ifdef CONFIG_SMP
/*
* Other CPUs should wait until code is relocated and
* then start at the entry point from LOONGARCH_IOCSR_MBUF0.
*/
SYM_CODE_START(kexec_smp_wait)
1: li.w t0, 0x100 /* wait for init loop */
2: addi.w t0, t0, -1 /* limit mailbox access */
bnez t0, 2b
li.w t1, LOONGARCH_IOCSR_MBUF0
iocsrrd.w s0, t1 /* check PC as an indicator */
beqz s0, 1b
iocsrrd.d s0, t1 /* get PC via mailbox */
li.d t0, CACHE_BASE
or s0, s0, t0 /* s0 = TO_CACHE(s0) */
jr s0 /* jump to initial PC */
SYM_CODE_END(kexec_smp_wait)
#endif
relocate_new_kernel_end:
SYM_DATA_START(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
SYM_DATA_END(relocate_new_kernel_size)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment