Commit b3444d16 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  lguest: populate initial_page_table
  lguest: restore boot speed
  lguest: fix crash lguest_time_init
parents 4ef5c68f da32dac1
...@@ -620,13 +620,13 @@ ENTRY(initial_code) ...@@ -620,13 +620,13 @@ ENTRY(initial_code)
__PAGE_ALIGNED_BSS __PAGE_ALIGNED_BSS
.align PAGE_SIZE_asm .align PAGE_SIZE_asm
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
initial_pg_pmd: ENTRY(initial_pg_pmd)
.fill 1024*KPMDS,4,0 .fill 1024*KPMDS,4,0
#else #else
ENTRY(initial_page_table) ENTRY(initial_page_table)
.fill 1024,4,0 .fill 1024,4,0
#endif #endif
initial_pg_fixmap: ENTRY(initial_pg_fixmap)
.fill 1024,4,0 .fill 1024,4,0
ENTRY(empty_zero_page) ENTRY(empty_zero_page)
.fill 4096,1,0 .fill 4096,1,0
......
...@@ -531,7 +531,10 @@ static void lguest_write_cr3(unsigned long cr3) ...@@ -531,7 +531,10 @@ static void lguest_write_cr3(unsigned long cr3)
{ {
lguest_data.pgdir = cr3; lguest_data.pgdir = cr3;
lazy_hcall1(LHCALL_NEW_PGTABLE, cr3); lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
cr3_changed = true;
/* These two page tables are simple, linear, and used during boot */
if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
cr3_changed = true;
} }
static unsigned long lguest_read_cr3(void) static unsigned long lguest_read_cr3(void)
...@@ -703,9 +706,9 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) ...@@ -703,9 +706,9 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
* to forget all of them. Fortunately, this is very rare. * to forget all of them. Fortunately, this is very rare.
* *
* ... except in early boot when the kernel sets up the initial pagetables, * ... except in early boot when the kernel sets up the initial pagetables,
* which makes booting astonishingly slow: 1.83 seconds! So we don't even tell * which makes booting astonishingly slow: 48 seconds! So we don't even tell
* the Host anything changed until we've done the first page table switch, * the Host anything changed until we've done the first real page table switch,
* which brings boot back to 0.25 seconds. * which brings boot back to 4.3 seconds.
*/ */
static void lguest_set_pte(pte_t *ptep, pte_t pteval) static void lguest_set_pte(pte_t *ptep, pte_t pteval)
{ {
...@@ -1002,7 +1005,7 @@ static void lguest_time_init(void) ...@@ -1002,7 +1005,7 @@ static void lguest_time_init(void)
clockevents_register_device(&lguest_clockevent); clockevents_register_device(&lguest_clockevent);
/* Finally, we unblock the timer interrupt. */ /* Finally, we unblock the timer interrupt. */
enable_lguest_irq(0); clear_bit(0, lguest_data.blocked_interrupts);
} }
/* /*
...@@ -1349,9 +1352,6 @@ __init void lguest_init(void) ...@@ -1349,9 +1352,6 @@ __init void lguest_init(void)
*/ */
switch_to_new_gdt(0); switch_to_new_gdt(0);
/* We actually boot with all memory mapped, but let's say 128MB. */
max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
/* /*
* The Host<->Guest Switcher lives at the top of our address space, and * The Host<->Guest Switcher lives at the top of our address space, and
* the Host told us how big it is when we made LGUEST_INIT hypercall: * the Host told us how big it is when we made LGUEST_INIT hypercall:
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/pgtable.h>
/*G:020 /*G:020
* Our story starts with the kernel booting into startup_32 in * Our story starts with the kernel booting into startup_32 in
...@@ -37,9 +38,113 @@ ENTRY(lguest_entry) ...@@ -37,9 +38,113 @@ ENTRY(lguest_entry)
/* Set up the initial stack so we can run C code. */ /* Set up the initial stack so we can run C code. */
movl $(init_thread_union+THREAD_SIZE),%esp movl $(init_thread_union+THREAD_SIZE),%esp
call init_pagetables
/* Jumps are relative: we're running __PAGE_OFFSET too low. */ /* Jumps are relative: we're running __PAGE_OFFSET too low. */
jmp lguest_init+__PAGE_OFFSET jmp lguest_init+__PAGE_OFFSET
/*
* Initialize page tables. This creates a PDE and a set of page
* tables, which are located immediately beyond __brk_base. The variable
* _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end.
*
* FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they
* don't have a stack at this point, so we can't just use call and ret.
*/
init_pagetables:
#if PTRS_PER_PMD > 1
#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
#else
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
#endif
#define pa(X) ((X) - __PAGE_OFFSET)
/* Enough space to fit pagetables for the low memory linear map */
MAPPING_BEYOND_END = \
PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
#ifdef CONFIG_X86_PAE
/*
* In PAE mode initial_page_table is statically defined to contain
* enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
* entries). The identity mapping is handled by pointing two PGD entries
* to the first kernel PMD.
*
* Note the upper half of each PMD or PTE are always zero at this stage.
*/
#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
xorl %ebx,%ebx /* %ebx is kept at zero */
movl $pa(__brk_base), %edi
movl $pa(initial_pg_pmd), %edx
movl $PTE_IDENT_ATTR, %eax
10:
leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
movl %ecx,(%edx) /* Store PMD entry */
/* Upper half already zero */
addl $8,%edx
movl $512,%ecx
11:
stosl
xchgl %eax,%ebx
stosl
xchgl %eax,%ebx
addl $0x1000,%eax
loop 11b
/*
* End condition: we must map up to the end + MAPPING_BEYOND_END.
*/
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
cmpl %ebp,%eax
jb 10b
1:
addl $__PAGE_OFFSET, %edi
movl %edi, pa(_brk_end)
shrl $12, %eax
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
#else /* Not PAE */
page_pde_offset = (__PAGE_OFFSET >> 20);
movl $pa(__brk_base), %edi
movl $pa(initial_page_table), %edx
movl $PTE_IDENT_ATTR, %eax
10:
leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
movl %ecx,(%edx) /* Store identity PDE entry */
movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
addl $4,%edx
movl $1024, %ecx
11:
stosl
addl $0x1000,%eax
loop 11b
/*
* End condition: we must map up to the end + MAPPING_BEYOND_END.
*/
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
cmpl %ebp,%eax
jb 10b
addl $__PAGE_OFFSET, %edi
movl %edi, pa(_brk_end)
shrl $12, %eax
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
movl %eax,pa(initial_page_table+0xffc)
#endif
ret
/*G:055 /*G:055
* We create a macro which puts the assembler code between lgstart_ and lgend_ * We create a macro which puts the assembler code between lgstart_ and lgend_
* markers. These templates are put in the .text section: they can't be * markers. These templates are put in the .text section: they can't be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment