Commit 07675f48 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86-32: Make sure the stack is set up before we use it
  x86, mtrr: Avoid MTRR reprogramming on BP during boot on UP platforms
  x86, nx: Don't force pages RW when setting NX bits
parents 585a7c66 11d4c3f9
...@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); ...@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
/* Static state in head.S used to set up a CPU */ /* Static state in head.S used to set up a CPU */
extern struct { extern unsigned long stack_start; /* Initial stack pointer address */
void *sp;
unsigned short ss;
} stack_start;
struct smp_ops { struct smp_ops {
void (*smp_prepare_boot_cpu)(void); void (*smp_prepare_boot_cpu)(void);
......
...@@ -100,7 +100,7 @@ int acpi_save_state_mem(void) ...@@ -100,7 +100,7 @@ int acpi_save_state_mem(void)
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
header->trampoline_segment = setup_trampoline() >> 4; header->trampoline_segment = setup_trampoline() >> 4;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
stack_start.sp = temp_stack + sizeof(temp_stack); stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address = early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id()); (unsigned long)get_cpu_gdt_table(smp_processor_id());
initial_gs = per_cpu_offset(smp_processor_id()); initial_gs = per_cpu_offset(smp_processor_id());
......
...@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void) ...@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
} }
/* /*
* MTRR initialization for all AP's * Delayed MTRR initialization for all AP's
*/ */
void mtrr_aps_init(void) void mtrr_aps_init(void)
{ {
if (!use_intel()) if (!use_intel())
return; return;
/*
* Check if someone has requested the delay of AP MTRR initialization,
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
if (!mtrr_aps_delayed_init)
return;
set_mtrr(~0U, 0, 0, 0); set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false; mtrr_aps_delayed_init = false;
} }
......
...@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) ...@@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
*/ */
__HEAD __HEAD
ENTRY(startup_32) ENTRY(startup_32)
movl pa(stack_start),%ecx
/* test KEEP_SEGMENTS flag to see if the bootloader is asking /* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */ us to not reload segments */
testb $(1<<6), BP_loadflags(%esi) testb $(1<<6), BP_loadflags(%esi)
...@@ -99,7 +101,9 @@ ENTRY(startup_32) ...@@ -99,7 +101,9 @@ ENTRY(startup_32)
movl %eax,%es movl %eax,%es
movl %eax,%fs movl %eax,%fs
movl %eax,%gs movl %eax,%gs
movl %eax,%ss
2: 2:
leal -__PAGE_OFFSET(%ecx),%esp
/* /*
* Clear BSS first so that there are no surprises... * Clear BSS first so that there are no surprises...
...@@ -145,8 +149,6 @@ ENTRY(startup_32) ...@@ -145,8 +149,6 @@ ENTRY(startup_32)
* _brk_end is set up to point to the first "safe" location. * _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping) * Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end. * and PAGE_OFFSET for up to _end.
*
* Note that the stack is not yet set up!
*/ */
#ifdef CONFIG_X86_PAE #ifdef CONFIG_X86_PAE
...@@ -282,6 +284,9 @@ ENTRY(startup_32_smp) ...@@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
movl %eax,%es movl %eax,%es
movl %eax,%fs movl %eax,%fs
movl %eax,%gs movl %eax,%gs
movl pa(stack_start),%ecx
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
default_entry: default_entry:
...@@ -347,8 +352,8 @@ default_entry: ...@@ -347,8 +352,8 @@ default_entry:
movl %eax,%cr0 /* ..and set paging (PG) bit */ movl %eax,%cr0 /* ..and set paging (PG) bit */
ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
1: 1:
/* Set up the stack pointer */ /* Shift the stack pointer to a virtual address */
lss stack_start,%esp addl $__PAGE_OFFSET, %esp
/* /*
* Initialize eflags. Some BIOS's leave bits like NT set. This would * Initialize eflags. Some BIOS's leave bits like NT set. This would
...@@ -360,9 +365,7 @@ default_entry: ...@@ -360,9 +365,7 @@ default_entry:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cmpb $0, ready cmpb $0, ready
jz 1f /* Initial CPU cleans BSS */ jnz checkCPUtype
jmp checkCPUtype
1:
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* /*
...@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP ...@@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
cld # gcc2 wants the direction flag cleared at all times cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder pushl $0 # fake return address for unwinder
#ifdef CONFIG_SMP
movb ready, %cl
movb $1, ready movb $1, ready
cmpb $0,%cl # the first CPU calls start_kernel
je 1f
movl (stack_start), %esp
1:
#endif /* CONFIG_SMP */
jmp *(initial_code) jmp *(initial_code)
/* /*
...@@ -670,15 +666,15 @@ ENTRY(initial_page_table) ...@@ -670,15 +666,15 @@ ENTRY(initial_page_table)
#endif #endif
.data .data
.balign 4
ENTRY(stack_start) ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE .long init_thread_union+THREAD_SIZE
.long __BOOT_DS
ready: .byte 0
early_recursion_flag: early_recursion_flag:
.long 0 .long 0
ready: .byte 0
int_msg: int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n" .asciz "Unknown interrupt or fault at: %p %p %p\n"
......
...@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) ...@@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
* target processor state. * target processor state.
*/ */
startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
(unsigned long)stack_start.sp); stack_start);
/* /*
* Run STARTUP IPI loop. * Run STARTUP IPI loop.
...@@ -785,7 +785,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) ...@@ -785,7 +785,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#endif #endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary; initial_code = (unsigned long)start_secondary;
stack_start.sp = (void *) c_idle.idle->thread.sp; stack_start = c_idle.idle->thread.sp;
/* start_ip had better be page-aligned! */ /* start_ip had better be page-aligned! */
start_ip = setup_trampoline(); start_ip = setup_trampoline();
......
...@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, ...@@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
unsigned long pfn) unsigned long pfn)
{ {
pgprot_t forbidden = __pgprot(0); pgprot_t forbidden = __pgprot(0);
pgprot_t required = __pgprot(0);
/* /*
* The BIOS area between 640k and 1Mb needs to be executable for * The BIOS area between 640k and 1Mb needs to be executable for
...@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, ...@@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW; pgprot_val(forbidden) |= _PAGE_RW;
/*
* .data and .bss should always be writable.
*/
if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
pgprot_val(required) |= _PAGE_RW;
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
/* /*
...@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, ...@@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
#endif #endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
return prot; return prot;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment