Commit 9da5ac23 authored by Russell King's avatar Russell King Committed by Christoffer Dall

ARM: soft-reboot into same mode that we entered the kernel

When we soft-reboot (eg, kexec) from one kernel into the next, we need
to ensure that we enter the new kernel in the same processor mode as
when we were entered, so that (eg) the new kernel can install its own
hypervisor - the old kernel's hypervisor will have been overwritten.

In order to do this, we need to pass a flag to cpu_reset() so it knows
what to do, and we need to modify the kernel's own hypervisor stub to
allow it to handle a soft-reboot.

As we are always guaranteed to install our own hypervisor if we're
entered in HYP32 mode, and KVM will have moved itself out of the way
on kexec/normal reboot, we can assume that our hypervisor is in place
when we want to kexec, so changing our hypervisor API should not be a
problem.
Tested-by: default avatarKeerthy <j-keerthy@ti.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@armlinux.org.uk>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <cdall@linaro.org>
parent 1342337b
...@@ -43,7 +43,7 @@ extern struct processor { ...@@ -43,7 +43,7 @@ extern struct processor {
/* /*
* Special stuff for a reset * Special stuff for a reset
*/ */
void (*reset)(unsigned long addr) __attribute__((noreturn)); void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
/* /*
* Idle the processor * Idle the processor
*/ */
...@@ -88,7 +88,7 @@ extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); ...@@ -88,7 +88,7 @@ extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else #else
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
#endif #endif
extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
/* These three are private to arch/arm/kernel/suspend.c */ /* These three are private to arch/arm/kernel/suspend.c */
extern void cpu_do_suspend(void *); extern void cpu_do_suspend(void *);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define HVC_GET_VECTORS 0 #define HVC_GET_VECTORS 0
#define HVC_SET_VECTORS 1 #define HVC_SET_VECTORS 1
#define HVC_SOFT_RESTART 2
#ifndef ZIMAGE #ifndef ZIMAGE
/* /*
...@@ -215,6 +216,10 @@ __hyp_stub_do_trap: ...@@ -215,6 +216,10 @@ __hyp_stub_do_trap:
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
b __hyp_stub_exit b __hyp_stub_exit
1: teq r0, #HVC_SOFT_RESTART
bne 1f
bx r3
1: mov r0, #-1 1: mov r0, #-1
__hyp_stub_exit: __hyp_stub_exit:
...@@ -256,6 +261,14 @@ ENTRY(__hyp_set_vectors) ...@@ -256,6 +261,14 @@ ENTRY(__hyp_set_vectors)
ret lr ret lr
ENDPROC(__hyp_set_vectors) ENDPROC(__hyp_set_vectors)
ENTRY(__hyp_soft_restart)
mov r3, r0
mov r0, #HVC_SOFT_RESTART
__HVC(0)
mov r0, r3
ret lr
ENDPROC(__hyp_soft_restart)
#ifndef ZIMAGE #ifndef ZIMAGE
.align 2 .align 2
.L__boot_cpu_mode_offset: .L__boot_cpu_mode_offset:
......
...@@ -12,10 +12,11 @@ ...@@ -12,10 +12,11 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/idmap.h> #include <asm/idmap.h>
#include <asm/virt.h>
#include "reboot.h" #include "reboot.h"
typedef void (*phys_reset_t)(unsigned long); typedef void (*phys_reset_t)(unsigned long, bool);
/* /*
* Function pointers to optional machine specific functions * Function pointers to optional machine specific functions
...@@ -51,7 +52,9 @@ static void __soft_restart(void *addr) ...@@ -51,7 +52,9 @@ static void __soft_restart(void *addr)
/* Switch to the identity mapping. */ /* Switch to the identity mapping. */
phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset); phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
phys_reset((unsigned long)addr);
/* original stub should be restored by kvm */
phys_reset((unsigned long)addr, is_hyp_mode_available());
/* Should never get here. */ /* Should never get here. */
BUG(); BUG();
......
...@@ -53,11 +53,15 @@ ENDPROC(cpu_v7_proc_fin) ...@@ -53,11 +53,15 @@ ENDPROC(cpu_v7_proc_fin)
.align 5 .align 5
.pushsection .idmap.text, "ax" .pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset) ENTRY(cpu_v7_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register mrc p15, 0, r2, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m bic r2, r2, #0x1 @ ...............m
THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
mcr p15, 0, r1, c1, c0, 0 @ disable MMU mcr p15, 0, r2, c1, c0, 0 @ disable MMU
isb isb
#ifdef CONFIG_ARM_VIRT_EXT
teq r1, #0
bne __hyp_soft_restart
#endif
bx r0 bx r0
ENDPROC(cpu_v7_reset) ENDPROC(cpu_v7_reset)
.popsection .popsection
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment