Commit 5b11f1ce authored by H. Peter Anvin's avatar H. Peter Anvin

x86, boot: straighten out ranges to copy/zero in compressed/head*.S

Both on 32 and 64 bits, we copy all the way up to the end of bss,
except that on 64 bits there is a hack to avoid copying on top of the
page tables.  There is no point in copying bss at all, especially
since we are just about to zero it all anyway.

To clean up and unify the handling, we now do:

  - copy from startup_32 to _bss.
  - zero from _bss to _ebss.
  - the _ebss symbol is aligned to an 8-byte boundary.
  - the page tables are moved to a separate section.

Use _bss as the copy endpoint since _edata may be misaligned.

[ Impact: cleanup, trivial performance improvement ]
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent b40d68d5
...@@ -93,9 +93,9 @@ ENTRY(startup_32) ...@@ -93,9 +93,9 @@ ENTRY(startup_32)
* where decompression in place becomes safe. * where decompression in place becomes safe.
*/ */
pushl %esi pushl %esi
leal _ebss(%ebp), %esi leal _bss(%ebp), %esi
leal _ebss(%ebx), %edi leal _bss(%ebx), %edi
movl $(_ebss - startup_32), %ecx movl $(_bss - startup_32), %ecx
std std
rep movsb rep movsb
cld cld
...@@ -125,7 +125,7 @@ relocated: ...@@ -125,7 +125,7 @@ relocated:
* Clear BSS * Clear BSS
*/ */
xorl %eax, %eax xorl %eax, %eax
leal _edata(%ebx), %edi leal _bss(%ebx), %edi
leal _ebss(%ebx), %ecx leal _ebss(%ebx), %ecx
subl %edi, %ecx subl %edi, %ecx
cld cld
......
...@@ -253,9 +253,9 @@ ENTRY(startup_64) ...@@ -253,9 +253,9 @@ ENTRY(startup_64)
* Copy the compressed kernel to the end of our buffer * Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe. * where decompression in place becomes safe.
*/ */
leaq _end_before_pgt(%rip), %r8 leaq _bss(%rip), %r8
leaq _end_before_pgt(%rbx), %r9 leaq _bss(%rbx), %r9
movq $_end_before_pgt /* - $startup_32 */, %rcx movq $_bss /* - $startup_32 */, %rcx
1: subq $8, %r8 1: subq $8, %r8
subq $8, %r9 subq $8, %r9
movq 0(%r8), %rax movq 0(%r8), %rax
...@@ -276,8 +276,8 @@ relocated: ...@@ -276,8 +276,8 @@ relocated:
* Clear BSS * Clear BSS
*/ */
xorq %rax, %rax xorq %rax, %rax
leaq _edata(%rbx), %rdi leaq _bss(%rbx), %rdi
leaq _end_before_pgt(%rbx), %rcx leaq _ebss(%rbx), %rcx
subq %rdi, %rcx subq %rdi, %rcx
cld cld
rep stosb rep stosb
...@@ -329,3 +329,11 @@ boot_heap: ...@@ -329,3 +329,11 @@ boot_heap:
boot_stack: boot_stack:
.fill BOOT_STACK_SIZE, 1, 0 .fill BOOT_STACK_SIZE, 1, 0
boot_stack_end: boot_stack_end:
/*
* Space for page tables (not in .bss so not zeroed)
*/
.section ".pgtable","a",@nobits
.balign 4096
pgtable:
.fill 6*4096, 1, 0
...@@ -2,6 +2,8 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) ...@@ -2,6 +2,8 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
#undef i386 #undef i386
#include <asm/page_types.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
OUTPUT_ARCH(i386:x86-64) OUTPUT_ARCH(i386:x86-64)
ENTRY(startup_64) ENTRY(startup_64)
...@@ -48,13 +50,16 @@ SECTIONS ...@@ -48,13 +50,16 @@ SECTIONS
*(.bss) *(.bss)
*(.bss.*) *(.bss.*)
*(COMMON) *(COMMON)
#ifdef CONFIG_X86_64 . = ALIGN(8); /* For convenience during zeroing */
. = ALIGN(8);
_end_before_pgt = . ;
. = ALIGN(4096);
pgtable = . ;
. = . + 4096 * 6;
#endif
_ebss = .; _ebss = .;
} }
#ifdef CONFIG_X86_64
. = ALIGN(PAGE_SIZE);
.pgtable : {
_pgtable = . ;
*(.pgtable)
_epgtable = . ;
}
#endif
_end = .;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment