Commit 5ea8d375 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, apic: Don't use logical-flat mode when CPU hotplug may exceed 8 CPUs
  x86-32: Make AT_VECTOR_SIZE_ARCH=2
  x86/agp: Fix amd64-agp module initialization regression
  x86, doc: Fix minor spelling error in arch/x86/mm/gup.c
parents f2d6cff7 681ee44d
......@@ -11,9 +11,9 @@
#include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
# define AT_VECTOR_SIZE_ARCH 2
#else
#else /* else it's non-compat x86-64 */
# define AT_VECTOR_SIZE_ARCH 1
#endif
......
......@@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void)
if (!error) {
acpi_lapic = 1;
#ifdef CONFIG_X86_BIGSMP
generic_bigsmp_probe();
#endif
/*
* Parse MADT IO-APIC entries
*/
......@@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void)
acpi_ioapic = 1;
smp_found_config = 1;
if (apic->setup_apic_routing)
apic->setup_apic_routing();
}
}
if (error == -EINVAL) {
......
......@@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
#endif
enable_IR_x2apic();
#ifdef CONFIG_X86_64
default_setup_apic_routing();
#endif
verify_local_APIC();
connect_bsp_APIC();
......@@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
if (apicid > max_physical_apicid)
max_physical_apicid = apicid;
#ifdef CONFIG_X86_32
if (num_processors > 8) {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
if (!APIC_XAPIC(version)) {
def_to_bigsmp = 0;
break;
}
/* If P4 and above fall through */
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
}
}
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
......
......@@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
}
late_initcall(print_ipi_mode);
void default_setup_apic_routing(void)
void __init default_setup_apic_routing(void)
{
int version = apic_version[boot_cpu_physical_apicid];
if (num_possible_cpus() > 8) {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
if (!APIC_XAPIC(version)) {
def_to_bigsmp = 0;
break;
}
/* If P4 and above fall through */
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
}
}
#ifdef CONFIG_X86_BIGSMP
generic_bigsmp_probe();
#endif
if (apic->setup_apic_routing)
apic->setup_apic_routing();
}
static void setup_apic_flat_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
printk(KERN_INFO
......@@ -103,7 +128,7 @@ struct apic apic_default = {
.init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = default_setup_apic_routing,
.setup_apic_routing = setup_apic_flat_routing,
.multi_timer_check = NULL,
.apicid_to_node = default_apicid_to_node,
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,
......
......@@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void)
}
#endif
if (apic == &apic_flat && num_processors > 8)
if (apic == &apic_flat && num_possible_cpus() > 8)
apic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
......
......@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
x86_init.mpparse.mpc_record(1);
}
#ifdef CONFIG_X86_BIGSMP
generic_bigsmp_probe();
#endif
if (apic->setup_apic_routing)
apic->setup_apic_routing();
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
......
......@@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
set_cpu_sibling_map(0);
enable_IR_x2apic();
#ifdef CONFIG_X86_64
default_setup_apic_routing();
#endif
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
......
......@@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
#else
/*
* With get_user_pages_fast, we walk down the pagetables without taking
* any locks. For this we would like to load the pointers atoimcally,
* any locks. For this we would like to load the pointers atomically,
* but that is not possible (without expensive cmpxchg8b) on PAE. What
* we do have is the guarantee that a pte will only either go from not
* present to present, or present to not present or both -- it will not
......
......@@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
static int __init agp_amd64_mod_init(void)
{
#ifndef MODULE
if (gart_iommu_aperture)
return agp_bridges_found ? 0 : -ENODEV;
#endif
return agp_amd64_init();
}
static void __exit agp_amd64_cleanup(void)
{
#ifndef MODULE
if (gart_iommu_aperture)
return;
#endif
if (aperture_resource)
release_resource(aperture_resource);
pci_unregister_driver(&agp_amd64_pci_driver);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment