Commit 1f43c539 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-fixes

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86-fixes:
  x86: fix PCI MSI breaks when booting with nosmp
  x86: vget_cycles() __always_inline
  x86: add more boot protocol documentation
  bootprotocol: cleanup
  x86: fix warning in "x86: clean up vSMP detection"
  x86: !x & y typo in mtrr code
parents c135b659 e90955c2
...@@ -40,9 +40,17 @@ Protocol 2.05: (Kernel 2.6.20) Make protected mode kernel relocatable. ...@@ -40,9 +40,17 @@ Protocol 2.05: (Kernel 2.6.20) Make protected mode kernel relocatable.
Introduce relocatable_kernel and kernel_alignment fields. Introduce relocatable_kernel and kernel_alignment fields.
Protocol 2.06: (Kernel 2.6.22) Added a field that contains the size of Protocol 2.06: (Kernel 2.6.22) Added a field that contains the size of
the boot command line the boot command line.
Protocol 2.09: (kernel 2.6.26) Added a field of 64-bit physical Protocol 2.07: (Kernel 2.6.24) Added paravirtualised boot protocol.
Introduced hardware_subarch and hardware_subarch_data
and KEEP_SEGMENTS flag in load_flags.
Protocol 2.08: (Kernel 2.6.26) Added crc32 checksum and ELF format
payload. Introduced payload_offset and payload length
fields to aid in locating the payload.
Protocol 2.09: (Kernel 2.6.26) Added a field of 64-bit physical
pointer to single linked list of struct setup_data. pointer to single linked list of struct setup_data.
**** MEMORY LAYOUT **** MEMORY LAYOUT
......
...@@ -90,7 +90,7 @@ u8 mtrr_type_lookup(u64 start, u64 end) ...@@ -90,7 +90,7 @@ u8 mtrr_type_lookup(u64 start, u64 end)
* Look of multiple ranges matching this address and pick type * Look of multiple ranges matching this address and pick type
* as per MTRR precedence * as per MTRR precedence
*/ */
if (!mtrr_state.enabled & 2) { if (!(mtrr_state.enabled & 2)) {
return mtrr_state.def_type; return mtrr_state.def_type;
} }
......
...@@ -100,7 +100,7 @@ void __init free_early(unsigned long start, unsigned long end) ...@@ -100,7 +100,7 @@ void __init free_early(unsigned long start, unsigned long end)
for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++) for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
; ;
memcpy(&early_res[i], &early_res[i + 1], memmove(&early_res[i], &early_res[i + 1],
(j - 1 - i) * sizeof(struct early_res)); (j - 1 - i) * sizeof(struct early_res));
early_res[j - 1].end = 0; early_res[j - 1].end = 0;
......
...@@ -1149,14 +1149,10 @@ static int __init smp_sanity_check(unsigned max_cpus) ...@@ -1149,14 +1149,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
"forcing use of dummy APIC emulation.\n"); "forcing use of dummy APIC emulation.\n");
smpboot_clear_io_apic(); smpboot_clear_io_apic();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if (nmi_watchdog == NMI_LOCAL_APIC) {
printk(KERN_INFO "activating minimal APIC for"
"NMI watchdog use.\n");
connect_bsp_APIC(); connect_bsp_APIC();
#endif
setup_local_APIC(); setup_local_APIC();
end_local_APIC_setup(); end_local_APIC_setup();
}
#endif
return -1; return -1;
} }
......
...@@ -133,7 +133,7 @@ int is_vsmp_box(void) ...@@ -133,7 +133,7 @@ int is_vsmp_box(void)
} }
} }
#else #else
static int __init detect_vsmp_box(void) static void __init detect_vsmp_box(void)
{ {
} }
int is_vsmp_box(void) int is_vsmp_box(void)
......
...@@ -32,7 +32,7 @@ static inline cycles_t get_cycles(void) ...@@ -32,7 +32,7 @@ static inline cycles_t get_cycles(void)
return ret; return ret;
} }
static inline cycles_t vget_cycles(void) static __always_inline cycles_t vget_cycles(void)
{ {
/* /*
* We only do VDSOs on TSC capable CPUs, so this shouldnt * We only do VDSOs on TSC capable CPUs, so this shouldnt
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment