Commit d0537508 authored by Eric W. Biederman's avatar Eric W. Biederman Committed by Linus Torvalds

[PATCH] kexec: x86_64: add CONFIG_PHYSICAL_START

For one kernel to report a crash another kernel has created we need
to have 2 kernels loaded simultaneously in memory.  To accomplish this
the two kernels need to built to run at different physical addresses.

This patch adds the CONFIG_PHYSICAL_START option to the x86_64 kernel
so we can do just that.  You need to know what you are doing and
the ramifications are before changing this value, and most users
won't care so I have made it depend on CONFIG_EMBEDDED

bzImage kernels will work and run at a different address when compiled
with this option but they will still load at 1MB.  If you need a kernel
loaded at a different address as well you need to boot a vmlinux.
Signed-off-by: default avatarEric Biederman <ebiederm@xmission.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8a919085
...@@ -369,6 +369,17 @@ config X86_MCE_INTEL ...@@ -369,6 +369,17 @@ config X86_MCE_INTEL
Additional support for intel specific MCE features such as Additional support for intel specific MCE features such as
the thermal monitor. the thermal monitor.
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if EMBEDDED
default "0x100000"
help
This gives the physical address where the kernel is loaded.
Primarily used in the case of kexec on panic where the
fail safe kernel needs to run at a different address than
the panic-ed kernel.
Don't change this unless you know what you are doing.
config SECCOMP config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode" bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS depends on PROC_FS
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h>
.code32 .code32
.globl startup_32 .globl startup_32
...@@ -77,7 +78,7 @@ startup_32: ...@@ -77,7 +78,7 @@ startup_32:
jnz 3f jnz 3f
addl $8,%esp addl $8,%esp
xorl %ebx,%ebx xorl %ebx,%ebx
ljmp $(__KERNEL_CS), $0x100000 ljmp $(__KERNEL_CS), $__PHYSICAL_START
/* /*
* We come here, if we were loaded high. * We come here, if we were loaded high.
...@@ -103,7 +104,7 @@ startup_32: ...@@ -103,7 +104,7 @@ startup_32:
popl %ecx # lcount popl %ecx # lcount
popl %edx # high_buffer_start popl %edx # high_buffer_start
popl %eax # hcount popl %eax # hcount
movl $0x100000,%edi movl $__PHYSICAL_START,%edi
cli # make sure we don't get interrupted cli # make sure we don't get interrupted
ljmp $(__KERNEL_CS), $0x1000 # and jump to the move routine ljmp $(__KERNEL_CS), $0x1000 # and jump to the move routine
...@@ -128,7 +129,7 @@ move_routine_start: ...@@ -128,7 +129,7 @@ move_routine_start:
movsl movsl
movl %ebx,%esi # Restore setup pointer movl %ebx,%esi # Restore setup pointer
xorl %ebx,%ebx xorl %ebx,%ebx
ljmp $(__KERNEL_CS), $0x100000 ljmp $(__KERNEL_CS), $__PHYSICAL_START
move_routine_end: move_routine_end:
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "miscsetup.h" #include "miscsetup.h"
#include <asm/io.h> #include <asm/io.h>
#include <asm/page.h>
/* /*
* gzip declarations * gzip declarations
...@@ -284,7 +285,7 @@ void setup_normal_output_buffer(void) ...@@ -284,7 +285,7 @@ void setup_normal_output_buffer(void)
#else #else
if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory"); if ((ALT_MEM_K > EXT_MEM_K ? ALT_MEM_K : EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif #endif
output_data = (char *)0x100000; /* Points to 1M */ output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode; free_mem_end_ptr = (long)real_mode;
} }
...@@ -307,8 +308,8 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv) ...@@ -307,8 +308,8 @@ void setup_output_buffer_if_we_run_high(struct moveparams *mv)
low_buffer_size = low_buffer_end - LOW_BUFFER_START; low_buffer_size = low_buffer_end - LOW_BUFFER_START;
high_loaded = 1; high_loaded = 1;
free_mem_end_ptr = (long)high_buffer_start; free_mem_end_ptr = (long)high_buffer_start;
if ( (0x100000 + low_buffer_size) > ((ulg)high_buffer_start)) { if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
high_buffer_start = (uch *)(0x100000 + low_buffer_size); high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
mv->hcount = 0; /* say: we need not to move high_buffer */ mv->hcount = 0; /* say: we need not to move high_buffer */
} }
else mv->hcount = -1; else mv->hcount = -1;
......
...@@ -248,23 +248,23 @@ ENTRY(_stext) ...@@ -248,23 +248,23 @@ ENTRY(_stext)
*/ */
.org 0x1000 .org 0x1000
ENTRY(init_level4_pgt) ENTRY(init_level4_pgt)
.quad 0x0000000000102007 /* -> level3_ident_pgt */ .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
.fill 255,8,0 .fill 255,8,0
.quad 0x000000000010a007 .quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0 .fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000103007 /* -> level3_kernel_pgt */ .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
.org 0x2000 .org 0x2000
ENTRY(level3_ident_pgt) ENTRY(level3_ident_pgt)
.quad 0x0000000000104007 .quad 0x0000000000004007 + __PHYSICAL_START
.fill 511,8,0 .fill 511,8,0
.org 0x3000 .org 0x3000
ENTRY(level3_kernel_pgt) ENTRY(level3_kernel_pgt)
.fill 510,8,0 .fill 510,8,0
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
.quad 0x0000000000105007 /* -> level2_kernel_pgt */ .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt */
.fill 1,8,0 .fill 1,8,0
.org 0x4000 .org 0x4000
...@@ -337,17 +337,17 @@ ENTRY(empty_bad_pmd_table) ...@@ -337,17 +337,17 @@ ENTRY(empty_bad_pmd_table)
.org 0xa000 .org 0xa000
ENTRY(level3_physmem_pgt) ENTRY(level3_physmem_pgt)
.quad 0x0000000000105007 /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */ .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
.org 0xb000 .org 0xb000
#ifdef CONFIG_ACPI_SLEEP #ifdef CONFIG_ACPI_SLEEP
ENTRY(wakeup_level4_pgt) ENTRY(wakeup_level4_pgt)
.quad 0x0000000000102007 /* -> level3_ident_pgt */ .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
.fill 255,8,0 .fill 255,8,0
.quad 0x000000000010a007 .quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0 .fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000103007 /* -> level3_kernel_pgt */ .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
#endif #endif
.data .data
......
...@@ -64,12 +64,14 @@ typedef struct { unsigned long pgprot; } pgprot_t; ...@@ -64,12 +64,14 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
#define __START_KERNEL 0xffffffff80100000UL #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map 0xffffffff80000000UL #define __START_KERNEL_map 0xffffffff80000000UL
#define __PAGE_OFFSET 0xffff810000000000UL #define __PAGE_OFFSET 0xffff810000000000UL
#else #else
#define __START_KERNEL 0xffffffff80100000 #define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map 0xffffffff80000000 #define __START_KERNEL_map 0xffffffff80000000
#define __PAGE_OFFSET 0xffff810000000000 #define __PAGE_OFFSET 0xffff810000000000
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment