Commit 6c81f168 authored by Linus Torvalds's avatar Linus Torvalds

Merge penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/numaq

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 49afbe6c 53b45352
...@@ -456,11 +456,6 @@ config NR_CPUS ...@@ -456,11 +456,6 @@ config NR_CPUS
This is purely to save memory - each supported CPU adds This is purely to save memory - each supported CPU adds
approximately eight kilobytes to the kernel image. approximately eight kilobytes to the kernel image.
config CLUSTERED_APIC
bool
depends on X86_NUMAQ || X86_SUMMIT
default y
# Common NUMA Features # Common NUMA Features
config NUMA config NUMA
bool "Numa Memory Allocation Support" bool "Numa Memory Allocation Support"
......
...@@ -23,7 +23,6 @@ ...@@ -23,7 +23,6 @@
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/smpboot.h>
#include <mach_ipi.h> #include <mach_ipi.h>
/* /*
......
...@@ -47,12 +47,12 @@ ...@@ -47,12 +47,12 @@
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/smpboot.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include "smpboot_hooks.h" #include "smpboot_hooks.h"
#include <mach_apic.h> #include <mach_apic.h>
#include <mach_wakecpu.h>
/* Set if we find a B stepping CPU */ /* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping; static int __initdata smp_b_stepping;
...@@ -348,8 +348,7 @@ void __init smp_callin(void) ...@@ -348,8 +348,7 @@ void __init smp_callin(void)
* our local APIC. We have to wait for the IPI or we'll * our local APIC. We have to wait for the IPI or we'll
* lock up on an APIC access. * lock up on an APIC access.
*/ */
if (!clustered_apic_mode) wait_for_init_deassert(&init_deasserted);
while (!atomic_read(&init_deasserted));
/* /*
* (This works even if the APIC is not enabled.) * (This works even if the APIC is not enabled.)
...@@ -398,13 +397,9 @@ void __init smp_callin(void) ...@@ -398,13 +397,9 @@ void __init smp_callin(void)
*/ */
Dprintk("CALLIN, before setup_local_APIC().\n"); Dprintk("CALLIN, before setup_local_APIC().\n");
/* smp_callin_clear_local_apic();
* Because we use NMIs rather than the INIT-STARTUP sequence to
* bootstrap the CPUs, the APIC may be in a weird state. Kick it.
*/
if (clustered_apic_mode)
clear_local_APIC();
setup_local_APIC(); setup_local_APIC();
map_cpu_to_logical_apicid();
local_irq_enable(); local_irq_enable();
...@@ -503,63 +498,58 @@ static struct task_struct * __init fork_by_hand(void) ...@@ -503,63 +498,58 @@ static struct task_struct * __init fork_by_hand(void)
return do_fork(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL); return do_fork(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
} }
/* which physical APIC ID maps to which logical CPU number */ #ifdef CONFIG_NUMA
volatile int physical_apicid_2_cpu[MAX_APICID];
/* which logical CPU number maps to which physical APIC ID */
volatile int cpu_2_physical_apicid[NR_CPUS];
/* which logical APIC ID maps to which logical CPU number */ /* which logical CPUs are on which nodes */
volatile int logical_apicid_2_cpu[MAX_APICID]; volatile unsigned long node_2_cpu_mask[MAX_NR_NODES] =
/* which logical CPU number maps to which logical APIC ID */ { [0 ... MAX_NR_NODES-1] = 0 };
volatile int cpu_2_logical_apicid[NR_CPUS]; /* which node each logical CPU is on */
volatile int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
static inline void init_cpu_to_apicid(void) /* set up a mapping between cpu and node. */
/* Initialize all maps between cpu number and apicids */ static inline void map_cpu_to_node(int cpu, int node)
{ {
int apicid, cpu; printk("Mapping cpu %d to node %d\n", cpu, node);
node_2_cpu_mask[node] |= (1 << cpu);
cpu_2_node[cpu] = node;
}
for (apicid = 0; apicid < MAX_APICID; apicid++) { /* undo a mapping between cpu and node. */
physical_apicid_2_cpu[apicid] = -1; static inline void unmap_cpu_to_node(int cpu)
logical_apicid_2_cpu[apicid] = -1; {
} int node;
for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpu_2_physical_apicid[cpu] = -1; printk("Unmapping cpu %d from all nodes\n", cpu);
cpu_2_logical_apicid[cpu] = -1; for (node = 0; node < MAX_NR_NODES; node ++)
} node_2_cpu_mask[node] &= ~(1 << cpu);
cpu_2_node[cpu] = -1;
} }
#else /* !CONFIG_NUMA */
static inline void map_cpu_to_boot_apicid(int cpu, int apicid) #define map_cpu_to_node(cpu, node) ({})
/* #define unmap_cpu_to_node(cpu) ({})
* set up a mapping between cpu and apicid. Uses logical apicids for multiquad,
* else physical apic ids #endif /* CONFIG_NUMA */
*/
volatile u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
void map_cpu_to_logical_apicid(void)
{ {
if (clustered_apic_mode) { int cpu = smp_processor_id();
logical_apicid_2_cpu[apicid] = cpu; int apicid = logical_smp_processor_id();
cpu_2_logical_apicid[cpu] = apicid;
} else { cpu_2_logical_apicid[cpu] = apicid;
physical_apicid_2_cpu[apicid] = cpu; map_cpu_to_node(cpu, apicid_to_node(apicid));
cpu_2_physical_apicid[cpu] = apicid;
}
} }
static inline void unmap_cpu_to_boot_apicid(int cpu, int apicid) void unmap_cpu_to_logical_apicid(int cpu)
/*
* undo a mapping between cpu and apicid. Uses logical apicids for multiquad,
* else physical apic ids
*/
{ {
if (clustered_apic_mode) { cpu_2_logical_apicid[cpu] = BAD_APICID;
logical_apicid_2_cpu[apicid] = -1; unmap_cpu_to_node(cpu);
cpu_2_logical_apicid[cpu] = -1;
} else {
physical_apicid_2_cpu[apicid] = -1;
cpu_2_physical_apicid[cpu] = -1;
}
} }
#if APIC_DEBUG #if APIC_DEBUG
static inline void inquire_remote_apic(int apicid) static inline void __inquire_remote_apic(int apicid)
{ {
int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
char *names[] = { "ID", "VERSION", "SPIV" }; char *names[] = { "ID", "VERSION", "SPIV" };
...@@ -654,6 +644,15 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) ...@@ -654,6 +644,15 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
unsigned long send_status = 0, accept_status = 0; unsigned long send_status = 0, accept_status = 0;
int maxlvt, timeout, num_starts, j; int maxlvt, timeout, num_starts, j;
/*
* Be paranoid about clearing APIC errors.
*/
if (APIC_INTEGRATED(apic_version[phys_apicid])) {
apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
}
Dprintk("Asserting INIT.\n"); Dprintk("Asserting INIT.\n");
/* /*
...@@ -775,17 +774,18 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) ...@@ -775,17 +774,18 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
extern unsigned long cpu_initialized; extern unsigned long cpu_initialized;
static void __init do_boot_cpu (int apicid) static int __init do_boot_cpu(int apicid)
/* /*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID. * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
* Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
*/ */
{ {
struct task_struct *idle; struct task_struct *idle;
unsigned long boot_error = 0; unsigned long boot_error;
int timeout, cpu; int timeout, cpu;
unsigned long start_eip; unsigned long start_eip;
unsigned short nmi_high, nmi_low; unsigned short nmi_high = 0, nmi_low = 0;
cpu = ++cpucount; cpu = ++cpucount;
/* /*
...@@ -802,8 +802,6 @@ static void __init do_boot_cpu (int apicid) ...@@ -802,8 +802,6 @@ static void __init do_boot_cpu (int apicid)
*/ */
init_idle(idle, cpu); init_idle(idle, cpu);
map_cpu_to_boot_apicid(cpu, apicid);
idle->thread.eip = (unsigned long) start_secondary; idle->thread.eip = (unsigned long) start_secondary;
unhash_process(idle); unhash_process(idle);
...@@ -825,11 +823,7 @@ static void __init do_boot_cpu (int apicid) ...@@ -825,11 +823,7 @@ static void __init do_boot_cpu (int apicid)
Dprintk("Setting warm reset code and vector.\n"); Dprintk("Setting warm reset code and vector.\n");
if (clustered_apic_mode) { store_NMI_vector(&nmi_high, &nmi_low);
/* stash the current NMI vector, so we can put things back */
nmi_high = *((volatile unsigned short *) TRAMPOLINE_HIGH);
nmi_low = *((volatile unsigned short *) TRAMPOLINE_LOW);
}
CMOS_WRITE(0xa, 0xf); CMOS_WRITE(0xa, 0xf);
local_flush_tlb(); local_flush_tlb();
...@@ -839,24 +833,10 @@ static void __init do_boot_cpu (int apicid) ...@@ -839,24 +833,10 @@ static void __init do_boot_cpu (int apicid)
*((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf;
Dprintk("3.\n"); Dprintk("3.\n");
/*
* Be paranoid about clearing APIC errors.
*/
if (!clustered_apic_mode && APIC_INTEGRATED(apic_version[apicid])) {
apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
}
/*
* Status is now clean
*/
boot_error = 0;
/* /*
* Starting actual IPI sequence... * Starting actual IPI sequence...
*/ */
wakeup_secondary_cpu(apicid, start_eip); boot_error = wakeup_secondary_cpu(apicid, start_eip);
if (!boot_error) { if (!boot_error) {
/* /*
...@@ -890,15 +870,12 @@ static void __init do_boot_cpu (int apicid) ...@@ -890,15 +870,12 @@ static void __init do_boot_cpu (int apicid)
else else
/* trampoline code not run */ /* trampoline code not run */
printk("Not responding.\n"); printk("Not responding.\n");
#if APIC_DEBUG inquire_remote_apic(apicid);
if (!clustered_apic_mode)
inquire_remote_apic(apicid);
#endif
} }
} }
if (boot_error) { if (boot_error) {
/* Try to put things back the way they were before ... */ /* Try to put things back the way they were before ... */
unmap_cpu_to_boot_apicid(cpu, apicid); unmap_cpu_to_logical_apicid(cpu);
clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */ clear_bit(cpu, &cpu_callout_map); /* was set here (do_boot_cpu()) */
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
cpucount--; cpucount--;
...@@ -907,11 +884,7 @@ static void __init do_boot_cpu (int apicid) ...@@ -907,11 +884,7 @@ static void __init do_boot_cpu (int apicid)
/* mark "stuck" area as not stuck */ /* mark "stuck" area as not stuck */
*((volatile unsigned long *)trampoline_base) = 0; *((volatile unsigned long *)trampoline_base) = 0;
if(clustered_apic_mode) { return boot_error;
printk("Restoring NMI vector\n");
*((volatile unsigned short *) TRAMPOLINE_HIGH) = nmi_high;
*((volatile unsigned short *) TRAMPOLINE_LOW) = nmi_low;
}
} }
cycles_t cacheflush_time; cycles_t cacheflush_time;
...@@ -987,8 +960,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -987,8 +960,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
prof_multiplier[cpu] = 1; prof_multiplier[cpu] = 1;
} }
init_cpu_to_apicid();
/* /*
* Setup boot CPU information * Setup boot CPU information
*/ */
...@@ -997,7 +968,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -997,7 +968,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
print_cpu_info(&cpu_data[0]); print_cpu_info(&cpu_data[0]);
boot_cpu_logical_apicid = logical_smp_processor_id(); boot_cpu_logical_apicid = logical_smp_processor_id();
map_cpu_to_boot_apicid(0, boot_cpu_apicid);
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
smp_tune_scheduling(); smp_tune_scheduling();
...@@ -1021,10 +991,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -1021,10 +991,9 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
* CPU too, but we do it for the sake of robustness anyway. * CPU too, but we do it for the sake of robustness anyway.
* Makes no sense to do this check in clustered apic mode, so skip it * Makes no sense to do this check in clustered apic mode, so skip it
*/ */
if (!clustered_apic_mode && if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
!test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map)) {
printk("weird, boot CPU (#%d) not listed by the BIOS.\n", printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
boot_cpu_physical_apicid); boot_cpu_physical_apicid);
phys_cpu_present_map |= (1 << hard_smp_processor_id()); phys_cpu_present_map |= (1 << hard_smp_processor_id());
} }
...@@ -1055,6 +1024,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -1055,6 +1024,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
connect_bsp_APIC(); connect_bsp_APIC();
setup_local_APIC(); setup_local_APIC();
map_cpu_to_logical_apicid();
if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid)
BUG(); BUG();
...@@ -1083,13 +1053,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -1083,13 +1053,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
if (max_cpus <= cpucount+1) if (max_cpus <= cpucount+1)
continue; continue;
do_boot_cpu(apicid); if (do_boot_cpu(apicid))
/*
* Make sure we unmap all failed CPUs
*/
if ((boot_apicid_to_cpu(apicid) == -1) &&
(phys_cpu_present_map & (1 << bit)))
printk("CPU #%d not responding - cannot use it.\n", printk("CPU #%d not responding - cannot use it.\n",
apicid); apicid);
} }
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/smpboot.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
......
...@@ -127,7 +127,7 @@ static int __init pci_numa_init(void) ...@@ -127,7 +127,7 @@ static int __init pci_numa_init(void)
return 0; return 0;
pci_root_bus = pcibios_scan_root(0); pci_root_bus = pcibios_scan_root(0);
if (clustered_apic_mode && (numnodes > 1)) { if (numnodes > 1) {
for (quad = 1; quad < numnodes; ++quad) { for (quad = 1; quad < numnodes; ++quad) {
printk("Scanning PCI bus %d for quad %d\n", printk("Scanning PCI bus %d for quad %d\n",
QUADLOCAL2BUS(quad,0), quad); QUADLOCAL2BUS(quad,0), quad);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#endif #endif
#define no_balance_irq (0) #define no_balance_irq (0)
#define esr_disable (0)
#define APIC_BROADCAST_ID 0x0F #define APIC_BROADCAST_ID 0x0F
#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid)) #define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid))
...@@ -53,6 +54,11 @@ static inline int multi_timer_check(int apic, int irq) ...@@ -53,6 +54,11 @@ static inline int multi_timer_check(int apic, int irq)
return 0; return 0;
} }
static inline int apicid_to_node(int logical_apicid)
{
return 0;
}
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
{ {
return mps_cpu; return mps_cpu;
...@@ -73,10 +79,13 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, int quad) ...@@ -73,10 +79,13 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, int quad)
return (m->mpc_apicid); return (m->mpc_apicid);
} }
#define WAKE_SECONDARY_VIA_INIT
static inline void setup_portio_remap(void) static inline void setup_portio_remap(void)
{ {
} }
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return test_bit(boot_cpu_physical_apicid, &phys_cpu_present_map);
}
#endif /* __ASM_MACH_APIC_H */ #endif /* __ASM_MACH_APIC_H */
#ifndef __ASM_MACH_WAKECPU_H
#define __ASM_MACH_WAKECPU_H
/*
* This file copes with machines that wakeup secondary CPUs by the
* INIT, INIT, STARTUP sequence.
*/
#define WAKE_SECONDARY_VIA_INIT
#define TRAMPOLINE_LOW phys_to_virt(0x467)
#define TRAMPOLINE_HIGH phys_to_virt(0x469)
#define boot_cpu_apicid boot_cpu_physical_apicid
static inline void wait_for_init_deassert(atomic_t *deassert)
{
while (!atomic_read(deassert));
return;
}
/* Nothing to do for most platforms, since cleared by the INIT cycle */
static inline void smp_callin_clear_local_apic(void)
{
}
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
{
}
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
{
}
#if APIC_DEBUG
#define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
#else
#define inquire_remote_apic(apicid) {}
#endif
#endif /* __ASM_MACH_WAKECPU_H */
#ifndef __ASM_MACH_APIC_H #ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H #define __ASM_MACH_APIC_H
#define APIC_DFR_VALUE (APIC_DFR_FLAT) #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
#define TARGET_CPUS (0xf) #define TARGET_CPUS (0xf)
#define no_balance_irq (1) #define no_balance_irq (1)
#define esr_disable (1)
#define APIC_BROADCAST_ID 0x0F #define APIC_BROADCAST_ID 0x0F
#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid)) #define check_apicid_used(bitmap, apicid) ((bitmap) & (1 << (apicid)))
static inline int apic_id_registered(void) static inline int apic_id_registered(void)
{ {
...@@ -26,6 +27,10 @@ static inline void clustered_apic_check(void) ...@@ -26,6 +27,10 @@ static inline void clustered_apic_check(void)
"NUMA-Q", nr_ioapics); "NUMA-Q", nr_ioapics);
} }
/*
* Skip adding the timer int on secondary nodes, which causes
* a small but painful rift in the time-space continuum.
*/
static inline int multi_timer_check(int apic, int irq) static inline int multi_timer_check(int apic, int irq)
{ {
return (apic != 0 && irq == 0); return (apic != 0 && irq == 0);
...@@ -47,14 +52,14 @@ static inline int generate_logical_apicid(int quad, int phys_apicid) ...@@ -47,14 +52,14 @@ static inline int generate_logical_apicid(int quad, int phys_apicid)
return ( (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1) ); return ( (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1) );
} }
static inline int apicid_to_quad(int logical_apicid) static inline int apicid_to_node(int logical_apicid)
{ {
return (logical_apicid >> 4); return (logical_apicid >> 4);
} }
static inline unsigned long apicid_to_cpu_present(int logical_apicid) static inline unsigned long apicid_to_cpu_present(int logical_apicid)
{ {
return ( (logical_apicid&0xf) << (4*apicid_to_quad(logical_apicid)) ); return ( (logical_apicid&0xf) << (4*apicid_to_node(logical_apicid)) );
} }
static inline int mpc_apic_id(struct mpc_config_processor *m, int quad) static inline int mpc_apic_id(struct mpc_config_processor *m, int quad)
...@@ -69,8 +74,6 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, int quad) ...@@ -69,8 +74,6 @@ static inline int mpc_apic_id(struct mpc_config_processor *m, int quad)
return logical_apicid; return logical_apicid;
} }
#define WAKE_SECONDARY_VIA_NMI
static inline void setup_portio_remap(void) static inline void setup_portio_remap(void)
{ {
if (numnodes <= 1) if (numnodes <= 1)
...@@ -82,4 +85,9 @@ static inline void setup_portio_remap(void) ...@@ -82,4 +85,9 @@ static inline void setup_portio_remap(void)
(u_long) xquad_portio, (u_long) numnodes*XQUAD_PORTIO_QUAD); (u_long) xquad_portio, (u_long) numnodes*XQUAD_PORTIO_QUAD);
} }
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return (1);
}
#endif /* __ASM_MACH_APIC_H */ #endif /* __ASM_MACH_APIC_H */
#ifndef __ASM_MACH_WAKECPU_H
#define __ASM_MACH_WAKECPU_H
/* This file copes with machines that wakeup secondary CPUs by NMIs */
#define WAKE_SECONDARY_VIA_NMI
#define TRAMPOLINE_LOW phys_to_virt(0x8)
#define TRAMPOLINE_HIGH phys_to_virt(0xa)
#define boot_cpu_apicid boot_cpu_logical_apicid
/* We don't do anything here because we use NMI's to boot instead */
static inline void wait_for_init_deassert(atomic_t *deassert)
{
}
/*
* Because we use NMIs rather than the INIT-STARTUP sequence to
* bootstrap the CPUs, the APIC may be in a weird state. Kick it.
*/
static inline void smp_callin_clear_local_apic(void)
{
clear_local_APIC();
}
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
{
printk("Storing NMI vector\n");
*high = *((volatile unsigned short *) TRAMPOLINE_HIGH);
*low = *((volatile unsigned short *) TRAMPOLINE_LOW);
}
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
{
printk("Restoring NMI vector\n");
*((volatile unsigned short *) TRAMPOLINE_HIGH) = *high;
*((volatile unsigned short *) TRAMPOLINE_LOW) = *low;
}
#define inquire_remote_apic(apicid) {}
#endif /* __ASM_MACH_WAKECPU_H */
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
extern int x86_summit; extern int x86_summit;
#define esr_disable (1)
#define XAPIC_DEST_CPUS_MASK 0x0Fu #define XAPIC_DEST_CPUS_MASK 0x0Fu
#define XAPIC_DEST_CLUSTER_MASK 0xF0u #define XAPIC_DEST_CLUSTER_MASK 0xF0u
...@@ -32,6 +34,11 @@ static inline void clustered_apic_check(void) ...@@ -32,6 +34,11 @@ static inline void clustered_apic_check(void)
(x86_summit ? "Summit" : "Flat"), nr_ioapics); (x86_summit ? "Summit" : "Flat"), nr_ioapics);
} }
static inline int apicid_to_node(int logical_apicid)
{
return (logical_apicid >> 5); /* 2 clusterids per CEC */
}
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (x86_summit) if (x86_summit)
...@@ -54,10 +61,13 @@ static inline unsigned long apicid_to_phys_cpu_present(int apicid) ...@@ -54,10 +61,13 @@ static inline unsigned long apicid_to_phys_cpu_present(int apicid)
return (1ul << apicid); return (1ul << apicid);
} }
#define WAKE_SECONDARY_VIA_INIT
static inline void setup_portio_remap(void) static inline void setup_portio_remap(void)
{ {
} }
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return (1);
}
#endif /* __ASM_MACH_APIC_H */ #endif /* __ASM_MACH_APIC_H */
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#ifdef CONFIG_X86_NUMAQ #ifdef CONFIG_X86_NUMAQ
#include <asm/smpboot.h>
/* /*
* for now assume that 64Gb is max amount of RAM for whole system * for now assume that 64Gb is max amount of RAM for whole system
* 64Gb / 4096bytes/page = 16777216 pages * 64Gb / 4096bytes/page = 16777216 pages
......
...@@ -22,22 +22,12 @@ ...@@ -22,22 +22,12 @@
#endif #endif
#endif #endif
#ifdef CONFIG_CLUSTERED_APIC #ifdef CONFIG_X86_NUMAQ
#define INT_DELIVERY_MODE 0 /* physical delivery on LOCAL quad */ #define INT_DELIVERY_MODE 0 /* physical delivery on LOCAL quad */
#else #else
#define INT_DELIVERY_MODE 1 /* logical delivery broadcast to all procs */ #define INT_DELIVERY_MODE 1 /* logical delivery broadcast to all procs */
#endif #endif
#ifndef clustered_apic_mode
#ifdef CONFIG_CLUSTERED_APIC
#define clustered_apic_mode (1)
#define esr_disable (1)
#else /* !CONFIG_CLUSTERED_APIC */
#define clustered_apic_mode (0)
#define esr_disable (0)
#endif /* CONFIG_CLUSTERED_APIC */
#endif
#define BAD_APICID 0xFFu #define BAD_APICID 0xFFu
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -62,15 +52,7 @@ extern void smp_invalidate_rcv(void); /* Process an NMI */ ...@@ -62,15 +52,7 @@ extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void); extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void); extern void zap_low_mappings (void);
/*
* Some lowlevel functions might want to know about
* the real APIC ID <-> CPU # mapping.
*/
#define MAX_APICID 256 #define MAX_APICID 256
extern volatile int cpu_to_physical_apicid[NR_CPUS];
extern volatile int physical_apicid_to_cpu[MAX_APICID];
extern volatile int cpu_to_logical_apicid[NR_CPUS];
extern volatile int logical_apicid_to_cpu[MAX_APICID];
/* /*
* This function is needed by all SMP systems. It must _always_ be valid * This function is needed by all SMP systems. It must _always_ be valid
...@@ -100,6 +82,15 @@ static inline int num_booting_cpus(void) ...@@ -100,6 +82,15 @@ static inline int num_booting_cpus(void)
return hweight32(cpu_callout_map); return hweight32(cpu_callout_map);
} }
/* Mapping from cpu number to logical apicid */
extern volatile u8 cpu_2_logical_apicid[];
static inline int cpu_to_logical_apicid(int cpu)
{
return (int)cpu_2_logical_apicid[cpu];
}
extern void map_cpu_to_logical_apicid(void);
extern void unmap_cpu_to_logical_apicid(int cpu);
extern inline int any_online_cpu(unsigned int mask) extern inline int any_online_cpu(unsigned int mask)
{ {
if (mask & cpu_online_map) if (mask & cpu_online_map)
......
#ifndef __ASM_SMPBOOT_H
#define __ASM_SMPBOOT_H
#ifndef clustered_apic_mode
#ifdef CONFIG_CLUSTERED_APIC
#define clustered_apic_mode (1)
#else /* !CONFIG_CLUSTERED_APIC */
#define clustered_apic_mode (0)
#endif /* CONFIG_CLUSTERED_APIC */
#endif
#ifdef CONFIG_CLUSTERED_APIC
#define TRAMPOLINE_LOW phys_to_virt(0x8)
#define TRAMPOLINE_HIGH phys_to_virt(0xa)
#else /* !CONFIG_CLUSTERED_APIC */
#define TRAMPOLINE_LOW phys_to_virt(0x467)
#define TRAMPOLINE_HIGH phys_to_virt(0x469)
#endif /* CONFIG_CLUSTERED_APIC */
#ifdef CONFIG_CLUSTERED_APIC
#define boot_cpu_apicid boot_cpu_logical_apicid
#else /* !CONFIG_CLUSTERED_APIC */
#define boot_cpu_apicid boot_cpu_physical_apicid
#endif /* CONFIG_CLUSTERED_APIC */
/*
* Mappings between logical cpu number and logical / physical apicid
* The first four macros are trivial, but it keeps the abstraction consistent
*/
extern volatile int logical_apicid_2_cpu[];
extern volatile int cpu_2_logical_apicid[];
extern volatile int physical_apicid_2_cpu[];
extern volatile int cpu_2_physical_apicid[];
#define logical_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
#define cpu_to_logical_apicid(cpu) cpu_2_logical_apicid[cpu]
#define physical_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
#define cpu_to_physical_apicid(cpu) cpu_2_physical_apicid[cpu]
#ifdef CONFIG_CLUSTERED_APIC /* use logical IDs to bootstrap */
#define boot_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
#define cpu_to_boot_apicid(cpu) cpu_2_logical_apicid[cpu]
#else /* !CONFIG_CLUSTERED_APIC */ /* use physical IDs to bootstrap */
#define boot_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
#define cpu_to_boot_apicid(cpu) cpu_2_physical_apicid[cpu]
#endif /* CONFIG_CLUSTERED_APIC */
#endif
...@@ -27,12 +27,17 @@ ...@@ -27,12 +27,17 @@
#ifndef _ASM_I386_TOPOLOGY_H #ifndef _ASM_I386_TOPOLOGY_H
#define _ASM_I386_TOPOLOGY_H #define _ASM_I386_TOPOLOGY_H
#ifdef CONFIG_X86_NUMAQ #ifdef CONFIG_NUMA
#include <asm/smpboot.h> /* Mappings between logical cpu number and node number */
extern volatile unsigned long node_2_cpu_mask[];
extern volatile int cpu_2_node[];
/* Returns the number of the node containing CPU 'cpu' */ /* Returns the number of the node containing CPU 'cpu' */
#define __cpu_to_node(cpu) (cpu_to_logical_apicid(cpu) >> 4) static inline int __cpu_to_node(int cpu)
{
return cpu_2_node[cpu];
}
/* Returns the number of the node containing MemBlk 'memblk' */ /* Returns the number of the node containing MemBlk 'memblk' */
#define __memblk_to_node(memblk) (memblk) #define __memblk_to_node(memblk) (memblk)
...@@ -41,49 +46,22 @@ ...@@ -41,49 +46,22 @@
so it is a pretty simple function! */ so it is a pretty simple function! */
#define __parent_node(node) (node) #define __parent_node(node) (node)
/* Returns the number of the first CPU on Node 'node'. /* Returns a bitmask of CPUs on Node 'node'. */
* This should be changed to a set of cached values
* but this will do for now.
*/
static inline int __node_to_first_cpu(int node)
{
int i, cpu, logical_apicid = node << 4;
for(i = 1; i < 16; i <<= 1)
/* check to see if the cpu is in the system */
if ((cpu = logical_apicid_to_cpu(logical_apicid | i)) >= 0)
/* if yes, return it to caller */
return cpu;
BUG(); /* couldn't find a cpu on given node */
return -1;
}
/* Returns a bitmask of CPUs on Node 'node'.
* This should be changed to a set of cached bitmasks
* but this will do for now.
*/
static inline unsigned long __node_to_cpu_mask(int node) static inline unsigned long __node_to_cpu_mask(int node)
{ {
int i, cpu, logical_apicid = node << 4; return node_2_cpu_mask[node];
unsigned long mask = 0UL; }
if (sizeof(unsigned long) * 8 < NR_CPUS)
BUG();
for(i = 1; i < 16; i <<= 1)
/* check to see if the cpu is in the system */
if ((cpu = logical_apicid_to_cpu(logical_apicid | i)) >= 0)
/* if yes, add to bitmask */
mask |= 1 << cpu;
return mask; /* Returns the number of the first CPU on Node 'node'. */
static inline int __node_to_first_cpu(int node)
{
return __ffs(__node_to_cpu_mask(node));
} }
/* Returns the number of the first MemBlk on Node 'node' */ /* Returns the number of the first MemBlk on Node 'node' */
#define __node_to_memblk(node) (node) #define __node_to_memblk(node) (node)
#else /* !CONFIG_X86_NUMAQ */ #else /* !CONFIG_NUMA */
/* /*
* Other i386 platforms should define their own version of the * Other i386 platforms should define their own version of the
* above macros here. * above macros here.
...@@ -91,6 +69,6 @@ static inline unsigned long __node_to_cpu_mask(int node) ...@@ -91,6 +69,6 @@ static inline unsigned long __node_to_cpu_mask(int node)
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
#endif /* CONFIG_X86_NUMAQ */ #endif /* CONFIG_NUMA */
#endif /* _ASM_I386_TOPOLOGY_H */ #endif /* _ASM_I386_TOPOLOGY_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment