Commit 9466d603 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'master' of...

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/travis/linux-2.6-cpus4096-for-ingo into cpus4096
parents 1f3f424a 83b19597
...@@ -591,19 +591,20 @@ config IOMMU_HELPER ...@@ -591,19 +591,20 @@ config IOMMU_HELPER
config MAXSMP config MAXSMP
bool "Configure Maximum number of SMP Processors and NUMA Nodes" bool "Configure Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && BROKEN depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
select CPUMASK_OFFSTACK
default n default n
help help
Configure maximum number of CPUS and NUMA Nodes for this architecture. Configure maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N. If unsure, say N.
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-512)" if !MAXSMP int "Maximum number of CPUs" if SMP && !MAXSMP
range 2 512 range 2 512 if SMP && !MAXSMP
depends on SMP default "1" if !SMP
default "4096" if MAXSMP default "4096" if MAXSMP
default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
default "8" default "8" if SMP
help help
This allows you to specify the maximum number of CPUs which this This allows you to specify the maximum number of CPUs which this
kernel will support. The maximum supported value is 512 and the kernel will support. The maximum supported value is 512 and the
......
...@@ -9,12 +9,12 @@ static inline int apic_id_registered(void) ...@@ -9,12 +9,12 @@ static inline int apic_id_registered(void)
return (1); return (1);
} }
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return cpu_online_map; return &cpu_online_map;
#else #else
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
#endif #endif
} }
...@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) ...@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid)
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < NR_CPUS) if (mps_cpu < nr_cpu_ids)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
return BAD_APICID; return BAD_APICID;
...@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; ...@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */ /* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_to_logical_apicid(int cpu)
{ {
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return BAD_APICID; return BAD_APICID;
return cpu_physical_id(cpu); return cpu_physical_id(cpu);
} }
...@@ -119,16 +119,32 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) ...@@ -119,16 +119,32 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
} }
/* As we are using single CPU as destination, pick only one CPU here */ /* As we are using single CPU as destination, pick only one CPU here */
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int cpu; int cpu;
int apicid; int apicid;
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
if (cpu < nr_cpu_ids)
return cpu_to_logical_apicid(cpu);
return BAD_APICID;
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{ {
return cpuid_apic >> index_msb; return cpuid_apic >> index_msb;
......
#ifndef __ASM_MACH_IPI_H #ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
static inline void send_IPI_allbutself(int vector) static inline void send_IPI_allbutself(int vector)
{ {
cpumask_t mask = cpu_online_map; send_IPI_mask_allbutself(cpu_online_mask, vector);
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector);
} }
#endif /* __ASM_MACH_IPI_H */ #endif /* __ASM_MACH_IPI_H */
...@@ -9,14 +9,14 @@ static inline int apic_id_registered(void) ...@@ -9,14 +9,14 @@ static inline int apic_id_registered(void)
return (1); return (1);
} }
static inline cpumask_t target_cpus_cluster(void) static inline const cpumask_t *target_cpus_cluster(void)
{ {
return CPU_MASK_ALL; return &CPU_MASK_ALL;
} }
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
return cpumask_of_cpu(smp_processor_id()); return &cpumask_of_cpu(smp_processor_id());
} }
#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
...@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS]; ...@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS];
static inline void setup_apic_routing(void) static inline void setup_apic_routing(void)
{ {
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ? (apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); "Physical Cluster" : "Logical Cluster",
nr_ioapics, cpus_addr(*target_cpus())[0]);
} }
static inline int multi_timer_check(int apic, int irq) static inline int multi_timer_check(int apic, int irq)
...@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) ...@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (!mps_cpu) if (!mps_cpu)
return boot_cpu_physical_apicid; return boot_cpu_physical_apicid;
else if (mps_cpu < NR_CPUS) else if (mps_cpu < nr_cpu_ids)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
else else
return BAD_APICID; return BAD_APICID;
...@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[]; ...@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[];
static inline int cpu_to_logical_apicid(int cpu) static inline int cpu_to_logical_apicid(int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return BAD_APICID; return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu]; return (int)cpu_2_logical_apicid[cpu];
#else #else
return logical_smp_processor_id(); return logical_smp_processor_id();
#endif #endif
...@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) ...@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
return (1); return (1);
} }
static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) static inline unsigned int
cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight(cpumask); num_bits_set = cpumask_weight(cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return 0xFF; return 0xFF;
...@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) ...@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) { if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
...@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) ...@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight(cpumask); num_bits_set = cpus_weight(*cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return cpu_to_logical_apicid(0); return cpu_to_logical_apicid(0);
...@@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) { if (cpu_isset(cpu, *cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
...@@ -212,6 +214,54 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -212,6 +214,54 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int num_bits_set;
int num_bits_set2;
int cpus_found = 0;
int cpu;
int apicid = 0;
num_bits_set = cpumask_weight(cpumask);
num_bits_set2 = cpumask_weight(andmask);
num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */
if (num_bits_set >= nr_cpu_ids)
#if defined CONFIG_ES7000_CLUSTERED_APIC
return 0xFF;
#else
return cpu_to_logical_apicid(0);
#endif
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = cpumask_first_and(cpumask, andmask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask) &&
cpumask_test_cpu(cpu, andmask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) {
printk(KERN_WARNING
"%s: Not a valid mask!\n", __func__);
#if defined CONFIG_ES7000_CLUSTERED_APIC
return 0xFF;
#else
return cpu_to_logical_apicid(0);
#endif
}
apicid = new_apicid;
cpus_found++;
}
cpu++;
}
return apicid;
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{ {
return cpuid_apic >> index_msb; return cpuid_apic >> index_msb;
......
#ifndef __ASM_ES7000_IPI_H #ifndef __ASM_ES7000_IPI_H
#define __ASM_ES7000_IPI_H #define __ASM_ES7000_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
static inline void send_IPI_allbutself(int vector) static inline void send_IPI_allbutself(int vector)
{ {
cpumask_t mask = cpu_online_map; send_IPI_mask_allbutself(cpu_online_mask, vector);
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector);
} }
#endif /* __ASM_ES7000_IPI_H */ #endif /* __ASM_ES7000_IPI_H */
...@@ -24,7 +24,7 @@ struct genapic { ...@@ -24,7 +24,7 @@ struct genapic {
int (*probe)(void); int (*probe)(void);
int (*apic_id_registered)(void); int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void); const struct cpumask *(*target_cpus)(void);
int int_delivery_mode; int int_delivery_mode;
int int_dest_mode; int int_dest_mode;
int ESR_DISABLE; int ESR_DISABLE;
...@@ -57,12 +57,16 @@ struct genapic { ...@@ -57,12 +57,16 @@ struct genapic {
unsigned (*get_apic_id)(unsigned long x); unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask; unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
cpumask_t (*vector_allocation_domain)(int cpu); unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* ipi */ /* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector); void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector); void (*send_IPI_all)(int vector);
#endif #endif
...@@ -114,6 +118,7 @@ struct genapic { ...@@ -114,6 +118,7 @@ struct genapic {
APICFUNC(get_apic_id) \ APICFUNC(get_apic_id) \
.apic_id_mask = APIC_ID_MASK, \ .apic_id_mask = APIC_ID_MASK, \
APICFUNC(cpu_mask_to_apicid) \ APICFUNC(cpu_mask_to_apicid) \
APICFUNC(cpu_mask_to_apicid_and) \
APICFUNC(vector_allocation_domain) \ APICFUNC(vector_allocation_domain) \
APICFUNC(acpi_madt_oem_check) \ APICFUNC(acpi_madt_oem_check) \
IPIFUNC(send_IPI_mask) \ IPIFUNC(send_IPI_mask) \
......
#ifndef _ASM_X86_GENAPIC_64_H #ifndef _ASM_X86_GENAPIC_64_H
#define _ASM_X86_GENAPIC_64_H #define _ASM_X86_GENAPIC_64_H
#include <linux/cpumask.h>
/* /*
* Copyright 2004 James Cleverdon, IBM. * Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2 * Subject to the GNU Public License, v.2
...@@ -18,16 +20,20 @@ struct genapic { ...@@ -18,16 +20,20 @@ struct genapic {
u32 int_delivery_mode; u32 int_delivery_mode;
u32 int_dest_mode; u32 int_dest_mode;
int (*apic_id_registered)(void); int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void); const struct cpumask *(*target_cpus)(void);
cpumask_t (*vector_allocation_domain)(int cpu); void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
void (*init_apic_ldr)(void); void (*init_apic_ldr)(void);
/* ipi */ /* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector); void (*send_IPI_mask)(const struct cpumask *mask, int vector);
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
int vector);
void (*send_IPI_allbutself)(int vector); void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector); void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector); void (*send_IPI_self)(int vector);
/* */ /* */
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
const struct cpumask *andmask);
unsigned int (*phys_pkg_id)(int index_msb); unsigned int (*phys_pkg_id)(int index_msb);
unsigned int (*get_apic_id)(unsigned long x); unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id); unsigned long (*set_apic_id)(unsigned int id);
......
...@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, ...@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
native_apic_mem_write(APIC_ICR, cfg); native_apic_mem_write(APIC_ICR, cfg);
} }
static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) static inline void send_IPI_mask_sequence(const struct cpumask *mask,
int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned long query_cpu; unsigned long query_cpu;
...@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) ...@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
* - mbligh * - mbligh
*/ */
local_irq_save(flags); local_irq_save(flags);
for_each_cpu_mask_nr(query_cpu, mask) { for_each_cpu(query_cpu, mask) {
__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL); vector, APIC_DEST_PHYSICAL);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
if (query_cpu != this_cpu)
__send_IPI_dest_field(
per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
local_irq_restore(flags);
}
#endif /* _ASM_X86_IPI_H */ #endif /* _ASM_X86_IPI_H */
...@@ -37,7 +37,7 @@ extern int irqbalance_disable(char *str); ...@@ -37,7 +37,7 @@ extern int irqbalance_disable(char *str);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h> #include <linux/cpumask.h>
extern void fixup_irqs(cpumask_t map); extern void fixup_irqs(void);
#endif #endif
extern unsigned int do_IRQ(struct pt_regs *regs); extern unsigned int do_IRQ(struct pt_regs *regs);
......
...@@ -8,12 +8,12 @@ ...@@ -8,12 +8,12 @@
#define APIC_DFR_VALUE (APIC_DFR_FLAT) #define APIC_DFR_VALUE (APIC_DFR_FLAT)
static inline cpumask_t target_cpus(void) static inline const struct cpumask *target_cpus(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
return cpu_online_map; return cpu_online_mask;
#else #else
return cpumask_of_cpu(0); return cpumask_of(0);
#endif #endif
} }
...@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void) ...@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void)
#define apic_id_registered (genapic->apic_id_registered) #define apic_id_registered (genapic->apic_id_registered)
#define init_apic_ldr (genapic->init_apic_ldr) #define init_apic_ldr (genapic->init_apic_ldr)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
#define phys_pkg_id (genapic->phys_pkg_id) #define phys_pkg_id (genapic->phys_pkg_id)
#define vector_allocation_domain (genapic->vector_allocation_domain) #define vector_allocation_domain (genapic->vector_allocation_domain)
#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
...@@ -61,9 +62,18 @@ static inline int apic_id_registered(void) ...@@ -61,9 +62,18 @@ static inline int apic_id_registered(void)
return physid_isset(read_apic_id(), phys_cpu_present_map); return physid_isset(read_apic_id(), phys_cpu_present_map);
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
{ {
return cpus_addr(cpumask)[0]; return cpumask_bits(cpumask)[0];
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpumask_bits(andmask)[0];
return (unsigned int)(mask1 & mask2);
} }
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
...@@ -88,7 +98,7 @@ static inline int apicid_to_node(int logical_apicid) ...@@ -88,7 +98,7 @@ static inline int apicid_to_node(int logical_apicid)
#endif #endif
} }
static inline cpumask_t vector_allocation_domain(int cpu) static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -98,8 +108,7 @@ static inline cpumask_t vector_allocation_domain(int cpu) ...@@ -98,8 +108,7 @@ static inline cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
return domain;
} }
#endif #endif
...@@ -131,7 +140,7 @@ static inline int cpu_to_logical_apicid(int cpu) ...@@ -131,7 +140,7 @@ static inline int cpu_to_logical_apicid(int cpu)
static inline int cpu_present_to_apicid(int mps_cpu) static inline int cpu_present_to_apicid(int mps_cpu)
{ {
if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
else else
return BAD_APICID; return BAD_APICID;
......
...@@ -4,7 +4,8 @@ ...@@ -4,7 +4,8 @@
/* Avoid include hell */ /* Avoid include hell */
#define NMI_VECTOR 0x02 #define NMI_VECTOR 0x02
void send_IPI_mask_bitmask(cpumask_t mask, int vector); void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
void __send_IPI_shortcut(unsigned int shortcut, int vector); void __send_IPI_shortcut(unsigned int shortcut, int vector);
extern int no_broadcast; extern int no_broadcast;
...@@ -12,28 +13,27 @@ extern int no_broadcast; ...@@ -12,28 +13,27 @@ extern int no_broadcast;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#include <asm/genapic.h> #include <asm/genapic.h>
#define send_IPI_mask (genapic->send_IPI_mask) #define send_IPI_mask (genapic->send_IPI_mask)
#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
#else #else
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{ {
send_IPI_mask_bitmask(mask, vector); send_IPI_mask_bitmask(mask, vector);
} }
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
#endif #endif
static inline void __local_send_IPI_allbutself(int vector) static inline void __local_send_IPI_allbutself(int vector)
{ {
if (no_broadcast || vector == NMI_VECTOR) { if (no_broadcast || vector == NMI_VECTOR)
cpumask_t mask = cpu_online_map; send_IPI_mask_allbutself(cpu_online_mask, vector);
else
cpu_clear(smp_processor_id(), mask);
send_IPI_mask(mask, vector);
} else
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector); __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
} }
static inline void __local_send_IPI_all(int vector) static inline void __local_send_IPI_all(int vector)
{ {
if (no_broadcast || vector == NMI_VECTOR) if (no_broadcast || vector == NMI_VECTOR)
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector);
else else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector); __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define check_phys_apicid_present (genapic->check_phys_apicid_present) #define check_phys_apicid_present (genapic->check_phys_apicid_present)
#define check_apicid_used (genapic->check_apicid_used) #define check_apicid_used (genapic->check_apicid_used)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
#define vector_allocation_domain (genapic->vector_allocation_domain) #define vector_allocation_domain (genapic->vector_allocation_domain)
#define enable_apic_mode (genapic->enable_apic_mode) #define enable_apic_mode (genapic->enable_apic_mode)
#define phys_pkg_id (genapic->phys_pkg_id) #define phys_pkg_id (genapic->phys_pkg_id)
......
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
return CPU_MASK_ALL; return &CPU_MASK_ALL;
} }
#define NO_BALANCE_IRQ (1) #define NO_BALANCE_IRQ (1)
...@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void) ...@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void)
* We use physical apicids here, not logical, so just return the default * We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us * physical broadcast to stop people from breaking us
*/ */
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{
return (int) 0xF;
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{ {
return (int) 0xF; return (int) 0xF;
} }
......
#ifndef __ASM_NUMAQ_IPI_H #ifndef __ASM_NUMAQ_IPI_H
#define __ASM_NUMAQ_IPI_H #define __ASM_NUMAQ_IPI_H
void send_IPI_mask_sequence(cpumask_t, int vector); void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
static inline void send_IPI_allbutself(int vector) static inline void send_IPI_allbutself(int vector)
{ {
cpumask_t mask = cpu_online_map; send_IPI_mask_allbutself(cpu_online_mask, vector);
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector);
} }
#endif /* __ASM_NUMAQ_IPI_H */ #endif /* __ASM_NUMAQ_IPI_H */
...@@ -60,7 +60,7 @@ struct smp_ops { ...@@ -60,7 +60,7 @@ struct smp_ops {
void (*cpu_die)(unsigned int cpu); void (*cpu_die)(unsigned int cpu);
void (*play_dead)(void); void (*play_dead)(void);
void (*send_call_func_ipi)(cpumask_t mask); void (*send_call_func_ipi)(const struct cpumask *mask);
void (*send_call_func_single_ipi)(int cpu); void (*send_call_func_single_ipi)(int cpu);
}; };
...@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) ...@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
static inline void arch_send_call_function_ipi(cpumask_t mask) static inline void arch_send_call_function_ipi(cpumask_t mask)
{ {
smp_ops.send_call_func_ipi(mask); smp_ops.send_call_func_ipi(&mask);
} }
void cpu_disable_common(void); void cpu_disable_common(void);
...@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); ...@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu);
void native_play_dead(void); void native_play_dead(void);
void play_dead_common(void); void play_dead_common(void);
void native_send_call_func_ipi(cpumask_t mask); void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu); void native_send_call_func_single_ipi(int cpu);
extern void prefill_possible_map(void); extern void prefill_possible_map(void);
......
...@@ -14,13 +14,13 @@ ...@@ -14,13 +14,13 @@
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) #define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void)
{ {
/* CPU_MASK_ALL (0xff) has undefined behaviour with /* CPU_MASK_ALL (0xff) has undefined behaviour with
* dest_LowestPrio mode logical clustered apic interrupt routing * dest_LowestPrio mode logical clustered apic interrupt routing
* Just start on cpu 0. IRQ balancing will spread load * Just start on cpu 0. IRQ balancing will spread load
*/ */
return cpumask_of_cpu(0); return &cpumask_of_cpu(0);
} }
#define INT_DELIVERY_MODE (dest_LowestPrio) #define INT_DELIVERY_MODE (dest_LowestPrio)
...@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void) ...@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void)
{ {
} }
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
{ {
int num_bits_set; int num_bits_set;
int cpus_found = 0; int cpus_found = 0;
int cpu; int cpu;
int apicid; int apicid;
num_bits_set = cpus_weight(cpumask); num_bits_set = cpus_weight(*cpumask);
/* Return id to all */ /* Return id to all */
if (num_bits_set == NR_CPUS) if (num_bits_set == NR_CPUS)
return (int) 0xFF; return (int) 0xFF;
...@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
* The cpus in the mask must all be on the apic cluster. If are not * The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS. * on the same apicid cluster return default value of TARGET_CPUS.
*/ */
cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask);
apicid = cpu_to_logical_apicid(cpu); apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) { while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) { if (cpu_isset(cpu, *cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu); int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) != if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){ apicid_cluster(new_apicid)){
...@@ -170,6 +170,45 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -170,6 +170,45 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
return apicid; return apicid;
} }
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int num_bits_set;
int num_bits_set2;
int cpus_found = 0;
int cpu;
int apicid = 0;
num_bits_set = cpumask_weight(cpumask);
num_bits_set2 = cpumask_weight(andmask);
num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */
if (num_bits_set >= nr_cpu_ids)
return 0xFF;
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = cpumask_first_and(cpumask, andmask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask)
&& cpumask_test_cpu(cpu, andmask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) {
printk(KERN_WARNING
"%s: Not a valid mask!\n", __func__);
return 0xFF;
}
apicid = apicid | new_apicid;
cpus_found++;
}
cpu++;
}
return apicid;
}
/* cpuid returns the value latched in the HW at reset, not the APIC ID /* cpuid returns the value latched in the HW at reset, not the APIC ID
* register's value. For any box whose BIOS changes APIC IDs, like * register's value. For any box whose BIOS changes APIC IDs, like
* clustered APIC systems, we must use hard_smp_processor_id. * clustered APIC systems, we must use hard_smp_processor_id.
......
#ifndef __ASM_SUMMIT_IPI_H #ifndef __ASM_SUMMIT_IPI_H
#define __ASM_SUMMIT_IPI_H #define __ASM_SUMMIT_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const cpumask_t *mask, int vector)
{ {
send_IPI_mask_sequence(mask, vector); send_IPI_mask_sequence(mask, vector);
} }
...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) ...@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
cpu_clear(smp_processor_id(), mask); cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask)) if (!cpus_empty(mask))
send_IPI_mask(mask, vector); send_IPI_mask(&mask, vector);
} }
static inline void send_IPI_all(int vector) static inline void send_IPI_all(int vector)
{ {
send_IPI_mask(cpu_online_map, vector); send_IPI_mask(&cpu_online_map, vector);
} }
#endif /* __ASM_SUMMIT_IPI_H */ #endif /* __ASM_SUMMIT_IPI_H */
...@@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu); ...@@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu);
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
/* indicates that pointers to the topology cpumask_t maps are valid */ /* indicates that pointers to the topology cpumask_t maps are valid */
#define arch_provides_topology_pointers yes #define arch_provides_topology_pointers yes
......
...@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta, ...@@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_setup(enum clock_event_mode mode, static void lapic_timer_setup(enum clock_event_mode mode,
struct clock_event_device *evt); struct clock_event_device *evt);
static void lapic_timer_broadcast(const struct cpumask *mask); static void lapic_timer_broadcast(const cpumask_t *mask);
static void apic_pm_activate(void); static void apic_pm_activate(void);
/* /*
...@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode, ...@@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
/* /*
* Local APIC timer broadcast function * Local APIC timer broadcast function
*/ */
static void lapic_timer_broadcast(const struct cpumask *mask) static void lapic_timer_broadcast(const cpumask_t *mask)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
send_IPI_mask(*mask, LOCAL_TIMER_VECTOR); send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
#endif #endif
} }
...@@ -1903,8 +1903,8 @@ void __cpuinit generic_processor_info(int apicid, int version) ...@@ -1903,8 +1903,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
} }
#endif #endif
cpu_set(cpu, cpu_possible_map); set_cpu_possible(cpu, true);
cpu_set(cpu, cpu_present_map); set_cpu_present(cpu, true);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -2106,7 +2106,7 @@ __cpuinit int apic_is_clustered_box(void) ...@@ -2106,7 +2106,7 @@ __cpuinit int apic_is_clustered_box(void)
bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
bitmap_zero(clustermap, NUM_APIC_CLUSTERS); bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
/* are we being called early in kernel startup? */ /* are we being called early in kernel startup? */
if (bios_cpu_apicid) { if (bios_cpu_apicid) {
id = bios_cpu_apicid[i]; id = bios_cpu_apicid[i];
......
...@@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) ...@@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
per_cpu(cpuid4_info, cpu) = NULL; per_cpu(cpuid4_info, cpu) = NULL;
} }
static int __cpuinit detect_cache_attributes(unsigned int cpu) static void get_cpu_leaves(void *_retval)
{ {
struct _cpuid4_info *this_leaf; int j, *retval = _retval, cpu = smp_processor_id();
unsigned long j;
int retval;
cpumask_t oldmask;
if (num_cache_leaves == 0)
return -ENOENT;
per_cpu(cpuid4_info, cpu) = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
if (per_cpu(cpuid4_info, cpu) == NULL)
return -ENOMEM;
oldmask = current->cpus_allowed;
retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (retval)
goto out;
/* Do cpuid and store the results */ /* Do cpuid and store the results */
for (j = 0; j < num_cache_leaves; j++) { for (j = 0; j < num_cache_leaves; j++) {
struct _cpuid4_info *this_leaf;
this_leaf = CPUID4_INFO_IDX(cpu, j); this_leaf = CPUID4_INFO_IDX(cpu, j);
retval = cpuid4_cache_lookup(j, this_leaf); *retval = cpuid4_cache_lookup(j, this_leaf);
if (unlikely(retval < 0)) { if (unlikely(*retval < 0)) {
int i; int i;
for (i = 0; i < j; i++) for (i = 0; i < j; i++)
...@@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) ...@@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
} }
cache_shared_cpu_map_setup(cpu, j); cache_shared_cpu_map_setup(cpu, j);
} }
set_cpus_allowed_ptr(current, &oldmask); }
static int __cpuinit detect_cache_attributes(unsigned int cpu)
{
int retval;
if (num_cache_leaves == 0)
return -ENOENT;
per_cpu(cpuid4_info, cpu) = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
if (per_cpu(cpuid4_info, cpu) == NULL)
return -ENOMEM;
out: smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
if (retval) { if (retval) {
kfree(per_cpu(cpuid4_info, cpu)); kfree(per_cpu(cpuid4_info, cpu));
per_cpu(cpuid4_info, cpu) = NULL; per_cpu(cpuid4_info, cpu) = NULL;
......
...@@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ ...@@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
* CPU Initialization * CPU Initialization
*/ */
struct thresh_restart {
struct threshold_block *b;
int reset;
u16 old_limit;
};
/* must be called with correct cpu affinity */ /* must be called with correct cpu affinity */
static void threshold_restart_bank(struct threshold_block *b, static long threshold_restart_bank(void *_tr)
int reset, u16 old_limit)
{ {
struct thresh_restart *tr = _tr;
u32 mci_misc_hi, mci_misc_lo; u32 mci_misc_hi, mci_misc_lo;
rdmsr(b->address, mci_misc_lo, mci_misc_hi); rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
reset = 1; /* limit cannot be lower than err count */ tr->reset = 1; /* limit cannot be lower than err count */
if (reset) { /* reset err count and overflow bit */ if (tr->reset) { /* reset err count and overflow bit */
mci_misc_hi = mci_misc_hi =
(mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
(THRESHOLD_MAX - b->threshold_limit); (THRESHOLD_MAX - tr->b->threshold_limit);
} else if (old_limit) { /* change limit w/o reset */ } else if (tr->old_limit) { /* change limit w/o reset */
int new_count = (mci_misc_hi & THRESHOLD_MAX) + int new_count = (mci_misc_hi & THRESHOLD_MAX) +
(old_limit - b->threshold_limit); (tr->old_limit - tr->b->threshold_limit);
mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
(new_count & THRESHOLD_MAX); (new_count & THRESHOLD_MAX);
} }
b->interrupt_enable ? tr->b->interrupt_enable ?
(mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
(mci_misc_hi &= ~MASK_INT_TYPE_HI); (mci_misc_hi &= ~MASK_INT_TYPE_HI);
mci_misc_hi |= MASK_COUNT_EN_HI; mci_misc_hi |= MASK_COUNT_EN_HI;
wrmsr(b->address, mci_misc_lo, mci_misc_hi); wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
return 0;
} }
/* cpu init entry point, called from mce.c with preempt off */ /* cpu init entry point, called from mce.c with preempt off */
...@@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
u8 lvt_off; u8 lvt_off;
u32 low = 0, high = 0, address = 0; u32 low = 0, high = 0, address = 0;
struct thresh_restart tr;
for (bank = 0; bank < NR_BANKS; ++bank) { for (bank = 0; bank < NR_BANKS; ++bank) {
for (block = 0; block < NR_BLOCKS; ++block) { for (block = 0; block < NR_BLOCKS; ++block) {
...@@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) ...@@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
wrmsr(address, low, high); wrmsr(address, low, high);
threshold_defaults.address = address; threshold_defaults.address = address;
threshold_restart_bank(&threshold_defaults, 0, 0); tr.b = &threshold_defaults;
tr.reset = 0;
tr.old_limit = 0;
threshold_restart_bank(&tr);
} }
} }
} }
...@@ -251,20 +262,6 @@ struct threshold_attr { ...@@ -251,20 +262,6 @@ struct threshold_attr {
ssize_t(*store) (struct threshold_block *, const char *, size_t count); ssize_t(*store) (struct threshold_block *, const char *, size_t count);
}; };
static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
cpumask_t *newmask)
{
*oldmask = current->cpus_allowed;
cpus_clear(*newmask);
cpu_set(cpu, *newmask);
set_cpus_allowed_ptr(current, newmask);
}
static void affinity_restore(const cpumask_t *oldmask)
{
set_cpus_allowed_ptr(current, oldmask);
}
#define SHOW_FIELDS(name) \ #define SHOW_FIELDS(name) \
static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
{ \ { \
...@@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b, ...@@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
char *end; char *end;
cpumask_t oldmask, newmask; struct thresh_restart tr;
unsigned long new = simple_strtoul(buf, &end, 0); unsigned long new = simple_strtoul(buf, &end, 0);
if (end == buf) if (end == buf)
return -EINVAL; return -EINVAL;
b->interrupt_enable = !!new; b->interrupt_enable = !!new;
affinity_set(b->cpu, &oldmask, &newmask); tr.b = b;
threshold_restart_bank(b, 0, 0); tr.reset = 0;
affinity_restore(&oldmask); tr.old_limit = 0;
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
return end - buf; return end - buf;
} }
...@@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b, ...@@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
char *end; char *end;
cpumask_t oldmask, newmask; struct thresh_restart tr;
u16 old;
unsigned long new = simple_strtoul(buf, &end, 0); unsigned long new = simple_strtoul(buf, &end, 0);
if (end == buf) if (end == buf)
return -EINVAL; return -EINVAL;
...@@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b, ...@@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
new = THRESHOLD_MAX; new = THRESHOLD_MAX;
if (new < 1) if (new < 1)
new = 1; new = 1;
old = b->threshold_limit; tr.old_limit = b->threshold_limit;
b->threshold_limit = new; b->threshold_limit = new;
tr.b = b;
tr.reset = 0;
affinity_set(b->cpu, &oldmask, &newmask); work_on_cpu(b->cpu, threshold_restart_bank, &tr);
threshold_restart_bank(b, 0, old);
affinity_restore(&oldmask);
return end - buf; return end - buf;
} }
static ssize_t show_error_count(struct threshold_block *b, char *buf) static long local_error_count(void *_b)
{ {
u32 high, low; struct threshold_block *b = _b;
cpumask_t oldmask, newmask; u32 low, high;
affinity_set(b->cpu, &oldmask, &newmask);
rdmsr(b->address, low, high); rdmsr(b->address, low, high);
affinity_restore(&oldmask); return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
return sprintf(buf, "%x\n", }
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b));
} }
static ssize_t store_error_count(struct threshold_block *b, static ssize_t store_error_count(struct threshold_block *b,
const char *buf, size_t count) const char *buf, size_t count)
{ {
cpumask_t oldmask, newmask; struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
affinity_set(b->cpu, &oldmask, &newmask);
threshold_restart_bank(b, 1, 0); work_on_cpu(b->cpu, threshold_restart_bank, &tr);
affinity_restore(&oldmask);
return 1; return 1;
} }
...@@ -463,12 +462,19 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu, ...@@ -463,12 +462,19 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
return err; return err;
} }
static long local_allocate_threshold_blocks(void *_bank)
{
unsigned int *bank = _bank;
return allocate_threshold_blocks(smp_processor_id(), *bank, 0,
MSR_IA32_MC0_MISC + *bank * 4);
}
/* symlinks sibling shared banks to first core. first core owns dir/files. */ /* symlinks sibling shared banks to first core. first core owns dir/files. */
static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
{ {
int i, err = 0; int i, err = 0;
struct threshold_bank *b = NULL; struct threshold_bank *b = NULL;
cpumask_t oldmask, newmask;
char name[32]; char name[32];
sprintf(name, "threshold_bank%i", bank); sprintf(name, "threshold_bank%i", bank);
...@@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) ...@@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
per_cpu(threshold_banks, cpu)[bank] = b; per_cpu(threshold_banks, cpu)[bank] = b;
affinity_set(cpu, &oldmask, &newmask); err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank);
err = allocate_threshold_blocks(cpu, bank, 0,
MSR_IA32_MC0_MISC + bank * 4);
affinity_restore(&oldmask);
if (err) if (err)
goto out_free; goto out_free;
......
...@@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self, ...@@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self,
static void smp_send_nmi_allbutself(void) static void smp_send_nmi_allbutself(void)
{ {
cpumask_t mask = cpu_online_map; send_IPI_allbutself(NMI_VECTOR);
cpu_clear(safe_smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, NMI_VECTOR);
} }
static struct notifier_block crash_nmi_nb = { static struct notifier_block crash_nmi_nb = {
......
...@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 1; return 1;
} }
static cpumask_t flat_target_cpus(void) static const struct cpumask *flat_target_cpus(void)
{ {
return cpu_online_map; return cpu_online_mask;
} }
static cpumask_t flat_vector_allocation_domain(int cpu) static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu) ...@@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; cpumask_clear(retmask);
return domain; cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
} }
/* /*
...@@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void) ...@@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void)
apic_write(APIC_LDR, val); apic_write(APIC_LDR, val);
} }
static void flat_send_IPI_mask(cpumask_t cpumask, int vector) static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
{ {
unsigned long mask = cpus_addr(cpumask)[0];
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
...@@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) ...@@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
_flat_send_IPI_mask(mask, vector);
}
static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
int cpu = smp_processor_id();
if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask);
_flat_send_IPI_mask(mask, vector);
}
static void flat_send_IPI_allbutself(int vector) static void flat_send_IPI_allbutself(int vector)
{ {
int cpu = smp_processor_id();
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int hotplug = 1; int hotplug = 1;
#else #else
int hotplug = 0; int hotplug = 0;
#endif #endif
if (hotplug || vector == NMI_VECTOR) { if (hotplug || vector == NMI_VECTOR) {
cpumask_t allbutme = cpu_online_map; if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
unsigned long mask = cpumask_bits(cpu_online_mask)[0];
cpu_clear(smp_processor_id(), allbutme); if (cpu < BITS_PER_LONG)
clear_bit(cpu, &mask);
if (!cpus_empty(allbutme)) _flat_send_IPI_mask(mask, vector);
flat_send_IPI_mask(allbutme, vector); }
} else if (num_online_cpus() > 1) { } else if (num_online_cpus() > 1) {
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
} }
...@@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector) ...@@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector)
static void flat_send_IPI_all(int vector) static void flat_send_IPI_all(int vector)
{ {
if (vector == NMI_VECTOR) if (vector == NMI_VECTOR)
flat_send_IPI_mask(cpu_online_map, vector); flat_send_IPI_mask(cpu_online_mask, vector);
else else
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
} }
...@@ -135,9 +155,18 @@ static int flat_apic_id_registered(void) ...@@ -135,9 +155,18 @@ static int flat_apic_id_registered(void)
return physid_isset(read_xapic_id(), phys_cpu_present_map); return physid_isset(read_xapic_id(), phys_cpu_present_map);
} }
static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
}
static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{ {
return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
return mask1 & mask2;
} }
static unsigned int phys_pkg_id(int index_msb) static unsigned int phys_pkg_id(int index_msb)
...@@ -157,8 +186,10 @@ struct genapic apic_flat = { ...@@ -157,8 +186,10 @@ struct genapic apic_flat = {
.send_IPI_all = flat_send_IPI_all, .send_IPI_all = flat_send_IPI_all,
.send_IPI_allbutself = flat_send_IPI_allbutself, .send_IPI_allbutself = flat_send_IPI_allbutself,
.send_IPI_mask = flat_send_IPI_mask, .send_IPI_mask = flat_send_IPI_mask,
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
.send_IPI_self = apic_send_IPI_self, .send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid, .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id, .get_apic_id = get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
...@@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0; return 0;
} }
static cpumask_t physflat_target_cpus(void) static const struct cpumask *physflat_target_cpus(void)
{ {
return cpu_online_map; return cpu_online_mask;
} }
static cpumask_t physflat_vector_allocation_domain(int cpu) static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
return cpumask_of_cpu(cpu); cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
} }
static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{ {
send_IPI_mask_sequence(cpumask, vector); send_IPI_mask_sequence(cpumask, vector);
} }
static void physflat_send_IPI_allbutself(int vector) static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
int vector)
{ {
cpumask_t allbutme = cpu_online_map; send_IPI_mask_allbutself(cpumask, vector);
}
cpu_clear(smp_processor_id(), allbutme); static void physflat_send_IPI_allbutself(int vector)
physflat_send_IPI_mask(allbutme, vector); {
send_IPI_mask_allbutself(cpu_online_mask, vector);
} }
static void physflat_send_IPI_all(int vector) static void physflat_send_IPI_all(int vector)
{ {
physflat_send_IPI_mask(cpu_online_map, vector); physflat_send_IPI_mask(cpu_online_mask, vector);
} }
static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
{ {
int cpu; int cpu;
...@@ -224,13 +259,29 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -224,13 +259,29 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
else else
return BAD_APICID; return BAD_APICID;
} }
static unsigned int
physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
struct genapic apic_physflat = { struct genapic apic_physflat = {
.name = "physical flat", .name = "physical flat",
.acpi_madt_oem_check = physflat_acpi_madt_oem_check, .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
...@@ -243,8 +294,10 @@ struct genapic apic_physflat = { ...@@ -243,8 +294,10 @@ struct genapic apic_physflat = {
.send_IPI_all = physflat_send_IPI_all, .send_IPI_all = physflat_send_IPI_all,
.send_IPI_allbutself = physflat_send_IPI_allbutself, .send_IPI_allbutself = physflat_send_IPI_allbutself,
.send_IPI_mask = physflat_send_IPI_mask, .send_IPI_mask = physflat_send_IPI_mask,
.send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
.send_IPI_self = apic_send_IPI_self, .send_IPI_self = apic_send_IPI_self,
.cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id, .get_apic_id = get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
......
...@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void) static const struct cpumask *x2apic_target_cpus(void)
{ {
return cpumask_of_cpu(0); return cpumask_of(0);
} }
/* /*
* for now each logical cpu is in its own vector allocation domain. * for now each logical cpu is in its own vector allocation domain.
*/ */
static cpumask_t x2apic_vector_allocation_domain(int cpu) static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
cpumask_t domain = CPU_MASK_NONE; cpumask_clear(retmask);
cpu_set(cpu, domain); cpumask_set_cpu(cpu, retmask);
return domain;
} }
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
...@@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, ...@@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
* at once. We have 16 cpu's in a cluster. This will minimize IPI register * at once. We have 16 cpu's in a cluster. This will minimize IPI register
* writes. * writes.
*/ */
static void x2apic_send_IPI_mask(cpumask_t mask, int vector) static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned long query_cpu; unsigned long query_cpu;
local_irq_save(flags); local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) { for_each_cpu(query_cpu, mask)
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), __x2apic_send_IPI_dest(
vector, APIC_DEST_LOGICAL); per_cpu(x86_cpu_to_logical_apicid, query_cpu),
} vector, APIC_DEST_LOGICAL);
local_irq_restore(flags); local_irq_restore(flags);
} }
static void x2apic_send_IPI_allbutself(int vector) static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
int vector)
{ {
cpumask_t mask = cpu_online_map; unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
cpu_clear(smp_processor_id(), mask); local_irq_save(flags);
for_each_cpu(query_cpu, mask)
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
}
static void x2apic_send_IPI_allbutself(int vector)
{
unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
if (!cpus_empty(mask)) local_irq_save(flags);
x2apic_send_IPI_mask(mask, vector); for_each_online_cpu(query_cpu)
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
vector, APIC_DEST_LOGICAL);
local_irq_restore(flags);
} }
static void x2apic_send_IPI_all(int vector) static void x2apic_send_IPI_all(int vector)
{ {
x2apic_send_IPI_mask(cpu_online_map, vector); x2apic_send_IPI_mask(cpu_online_mask, vector);
} }
static int x2apic_apic_id_registered(void) static int x2apic_apic_id_registered(void)
...@@ -89,7 +109,7 @@ static int x2apic_apic_id_registered(void) ...@@ -89,7 +109,7 @@ static int x2apic_apic_id_registered(void)
return 1; return 1;
} }
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{ {
int cpu; int cpu;
...@@ -97,13 +117,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -97,13 +117,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask);
if ((unsigned)cpu < NR_CPUS) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_logical_apicid, cpu); return per_cpu(x86_cpu_to_logical_apicid, cpu);
else else
return BAD_APICID; return BAD_APICID;
} }
static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x) static unsigned int get_apic_id(unsigned long x)
{ {
unsigned int id; unsigned int id;
...@@ -150,8 +185,10 @@ struct genapic apic_x2apic_cluster = { ...@@ -150,8 +185,10 @@ struct genapic apic_x2apic_cluster = {
.send_IPI_all = x2apic_send_IPI_all, .send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
.send_IPI_self = x2apic_send_IPI_self, .send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id, .get_apic_id = get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
......
...@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t x2apic_target_cpus(void) static const struct cpumask *x2apic_target_cpus(void)
{ {
return cpumask_of_cpu(0); return cpumask_of(0);
} }
static cpumask_t x2apic_vector_allocation_domain(int cpu) static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
cpumask_t domain = CPU_MASK_NONE; cpumask_clear(retmask);
cpu_set(cpu, domain); cpumask_set_cpu(cpu, retmask);
return domain;
} }
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
...@@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, ...@@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
x2apic_icr_write(cfg, apicid); x2apic_icr_write(cfg, apicid);
} }
static void x2apic_send_IPI_mask(cpumask_t mask, int vector) static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned long query_cpu; unsigned long query_cpu;
local_irq_save(flags); local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) { for_each_cpu(query_cpu, mask) {
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL); vector, APIC_DEST_PHYSICAL);
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
static void x2apic_send_IPI_allbutself(int vector) static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
int vector)
{ {
cpumask_t mask = cpu_online_map; unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
local_irq_save(flags);
for_each_cpu(query_cpu, mask) {
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
cpu_clear(smp_processor_id(), mask); static void x2apic_send_IPI_allbutself(int vector)
{
unsigned long flags;
unsigned long query_cpu;
unsigned long this_cpu = smp_processor_id();
if (!cpus_empty(mask)) local_irq_save(flags);
x2apic_send_IPI_mask(mask, vector); for_each_online_cpu(query_cpu)
if (query_cpu != this_cpu)
__x2apic_send_IPI_dest(
per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
local_irq_restore(flags);
} }
static void x2apic_send_IPI_all(int vector) static void x2apic_send_IPI_all(int vector)
{ {
x2apic_send_IPI_mask(cpu_online_map, vector); x2apic_send_IPI_mask(cpu_online_mask, vector);
} }
static int x2apic_apic_id_registered(void) static int x2apic_apic_id_registered(void)
...@@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void) ...@@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void)
return 1; return 1;
} }
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
{ {
int cpu; int cpu;
...@@ -95,13 +116,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -95,13 +116,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask);
if ((unsigned)cpu < NR_CPUS) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
else else
return BAD_APICID; return BAD_APICID;
} }
static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x) static unsigned int get_apic_id(unsigned long x)
{ {
unsigned int id; unsigned int id;
...@@ -145,8 +181,10 @@ struct genapic apic_x2apic_phys = { ...@@ -145,8 +181,10 @@ struct genapic apic_x2apic_phys = {
.send_IPI_all = x2apic_send_IPI_all, .send_IPI_all = x2apic_send_IPI_all,
.send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_mask = x2apic_send_IPI_mask, .send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
.send_IPI_self = x2apic_send_IPI_self, .send_IPI_self = x2apic_send_IPI_self,
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id, .get_apic_id = get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
......
...@@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); ...@@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
static cpumask_t uv_target_cpus(void) static const struct cpumask *uv_target_cpus(void)
{ {
return cpumask_of_cpu(0); return cpumask_of(0);
} }
static cpumask_t uv_vector_allocation_domain(int cpu) static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
{ {
cpumask_t domain = CPU_MASK_NONE; cpumask_clear(retmask);
cpu_set(cpu, domain); cpumask_set_cpu(cpu, retmask);
return domain;
} }
int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
...@@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector) ...@@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector)
uv_write_global_mmr64(pnode, UVH_IPI_INT, val); uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
} }
static void uv_send_IPI_mask(cpumask_t mask, int vector) static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
{ {
unsigned int cpu; unsigned int cpu;
for_each_possible_cpu(cpu) for_each_cpu(cpu, mask)
if (cpu_isset(cpu, mask)) uv_send_IPI_one(cpu, vector);
}
static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
for_each_cpu(cpu, mask)
if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector); uv_send_IPI_one(cpu, vector);
} }
static void uv_send_IPI_allbutself(int vector) static void uv_send_IPI_allbutself(int vector)
{ {
cpumask_t mask = cpu_online_map; unsigned int cpu;
unsigned int this_cpu = smp_processor_id();
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask)) for_each_online_cpu(cpu)
uv_send_IPI_mask(mask, vector); if (cpu != this_cpu)
uv_send_IPI_one(cpu, vector);
} }
static void uv_send_IPI_all(int vector) static void uv_send_IPI_all(int vector)
{ {
uv_send_IPI_mask(cpu_online_map, vector); uv_send_IPI_mask(cpu_online_mask, vector);
} }
static int uv_apic_id_registered(void) static int uv_apic_id_registered(void)
...@@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void) ...@@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void)
{ {
} }
static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
{ {
int cpu; int cpu;
...@@ -164,13 +172,28 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) ...@@ -164,13 +172,28 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
* We're using fixed IRQ delivery, can only return one phys APIC ID. * We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first. * May as well be the first.
*/ */
cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask);
if ((unsigned)cpu < nr_cpu_ids) if ((unsigned)cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu); return per_cpu(x86_cpu_to_apicid, cpu);
else else
return BAD_APICID; return BAD_APICID;
} }
static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;
}
static unsigned int get_apic_id(unsigned long x) static unsigned int get_apic_id(unsigned long x)
{ {
unsigned int id; unsigned int id;
...@@ -218,8 +241,10 @@ struct genapic apic_x2apic_uv_x = { ...@@ -218,8 +241,10 @@ struct genapic apic_x2apic_uv_x = {
.send_IPI_all = uv_send_IPI_all, .send_IPI_all = uv_send_IPI_all,
.send_IPI_allbutself = uv_send_IPI_allbutself, .send_IPI_allbutself = uv_send_IPI_allbutself,
.send_IPI_mask = uv_send_IPI_mask, .send_IPI_mask = uv_send_IPI_mask,
.send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
.send_IPI_self = uv_send_IPI_self, .send_IPI_self = uv_send_IPI_self,
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid, .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
.phys_pkg_id = phys_pkg_id, .phys_pkg_id = phys_pkg_id,
.get_apic_id = get_apic_id, .get_apic_id = get_apic_id,
.set_apic_id = set_apic_id, .set_apic_id = set_apic_id,
......
This diff is collapsed.
...@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) ...@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
/* /*
* This is only used on smaller machines. * This is only used on smaller machines.
*/ */
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
{ {
unsigned long mask = cpus_addr(cpumask)[0]; unsigned long mask = cpumask_bits(cpumask)[0];
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
__send_IPI_dest_field(mask, vector); __send_IPI_dest_field(mask, vector);
local_irq_restore(flags); local_irq_restore(flags);
} }
void send_IPI_mask_sequence(cpumask_t mask, int vector) void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
{ {
unsigned long flags; unsigned long flags;
unsigned int query_cpu; unsigned int query_cpu;
...@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) ...@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
*/ */
local_irq_save(flags); local_irq_save(flags);
for_each_possible_cpu(query_cpu) { for_each_cpu(query_cpu, mask)
if (cpu_isset(query_cpu, mask)) { __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
local_irq_restore(flags);
}
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned long flags;
unsigned int query_cpu;
unsigned int this_cpu = smp_processor_id();
/* See Hack comment above */
local_irq_save(flags);
for_each_cpu(query_cpu, mask)
if (query_cpu != this_cpu)
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
vector); vector);
}
}
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs) ...@@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
#include <mach_apic.h> #include <mach_apic.h>
void fixup_irqs(cpumask_t map) /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{ {
unsigned int irq; unsigned int irq;
static int warned; static int warned;
struct irq_desc *desc; struct irq_desc *desc;
for_each_irq_desc(irq, desc) { for_each_irq_desc(irq, desc) {
cpumask_t mask; const struct cpumask *affinity;
if (!desc) if (!desc)
continue; continue;
if (irq == 2) if (irq == 2)
continue; continue;
cpus_and(mask, desc->affinity, map); affinity = &desc->affinity;
if (any_online_cpu(mask) == NR_CPUS) { if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq); printk("Breaking affinity for irq %i\n", irq);
mask = map; affinity = cpu_all_mask;
} }
if (desc->chip->set_affinity) if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, &mask); desc->chip->set_affinity(irq, affinity);
else if (desc->action && !(warned++)) else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq); printk("Cannot set affinity for irq %i\n", irq);
} }
......
...@@ -83,16 +83,17 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs) ...@@ -83,16 +83,17 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(cpumask_t map) /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{ {
unsigned int irq; unsigned int irq;
static int warned; static int warned;
struct irq_desc *desc; struct irq_desc *desc;
for_each_irq_desc(irq, desc) { for_each_irq_desc(irq, desc) {
cpumask_t mask;
int break_affinity = 0; int break_affinity = 0;
int set_affinity = 1; int set_affinity = 1;
const struct cpumask *affinity;
if (!desc) if (!desc)
continue; continue;
...@@ -102,23 +103,23 @@ void fixup_irqs(cpumask_t map) ...@@ -102,23 +103,23 @@ void fixup_irqs(cpumask_t map)
/* interrupt's are disabled at this point */ /* interrupt's are disabled at this point */
spin_lock(&desc->lock); spin_lock(&desc->lock);
affinity = &desc->affinity;
if (!irq_has_action(irq) || if (!irq_has_action(irq) ||
cpus_equal(desc->affinity, map)) { cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock); spin_unlock(&desc->lock);
continue; continue;
} }
cpus_and(mask, desc->affinity, map); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
if (cpus_empty(mask)) {
break_affinity = 1; break_affinity = 1;
mask = map; affinity = cpu_all_mask;
} }
if (desc->chip->mask) if (desc->chip->mask)
desc->chip->mask(irq); desc->chip->mask(irq);
if (desc->chip->set_affinity) if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, &mask); desc->chip->set_affinity(irq, affinity);
else if (!(warned++)) else if (!(warned++))
set_affinity = 0; set_affinity = 0;
......
...@@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void) ...@@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void)
old_size = PERCPU_ENOUGH_ROOM; old_size = PERCPU_ENOUGH_ROOM;
align = max_t(unsigned long, PAGE_SIZE, align); align = max_t(unsigned long, PAGE_SIZE, align);
size = roundup(old_size, align); size = roundup(old_size, align);
printk(KERN_INFO
"NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
size); size);
...@@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void) ...@@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void)
"cpu %d has no node %d or node-local memory\n", "cpu %d has no node %d or node-local memory\n",
cpu, node); cpu, node);
if (ptr) if (ptr)
printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", printk(KERN_DEBUG
"per cpu data for cpu%d at %016lx\n",
cpu, __pa(ptr)); cpu, __pa(ptr));
} }
else { else {
ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
__pa(MAX_DMA_ADDRESS)); __pa(MAX_DMA_ADDRESS));
if (ptr) if (ptr)
printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", printk(KERN_DEBUG
cpu, node, __pa(ptr)); "per cpu data for cpu%d on node%d "
"at %016lx\n",
cpu, node, __pa(ptr));
} }
#endif #endif
per_cpu_offset(cpu) = ptr - __per_cpu_start; per_cpu_offset(cpu) = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
} }
printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
NR_CPUS, nr_cpu_ids, nr_node_ids);
/* Setup percpu data maps */ /* Setup percpu data maps */
setup_per_cpu_maps(); setup_per_cpu_maps();
......
...@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu) ...@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu)
WARN_ON(1); WARN_ON(1);
return; return;
} }
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
} }
void native_send_call_func_single_ipi(int cpu) void native_send_call_func_single_ipi(int cpu)
{ {
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
} }
void native_send_call_func_ipi(cpumask_t mask) void native_send_call_func_ipi(const struct cpumask *mask)
{ {
cpumask_t allbutself; cpumask_t allbutself;
allbutself = cpu_online_map; allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself); cpu_clear(smp_processor_id(), allbutself);
if (cpus_equal(mask, allbutself) && if (cpus_equal(*mask, allbutself) &&
cpus_equal(cpu_online_map, cpu_callout_map)) cpus_equal(cpu_online_map, cpu_callout_map))
send_IPI_allbutself(CALL_FUNCTION_VECTOR); send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else else
......
...@@ -1344,7 +1344,7 @@ void cpu_disable_common(void) ...@@ -1344,7 +1344,7 @@ void cpu_disable_common(void)
lock_vector_lock(); lock_vector_lock();
remove_cpu_from_maps(cpu); remove_cpu_from_maps(cpu);
unlock_vector_lock(); unlock_vector_lock();
fixup_irqs(cpu_online_map); fixup_irqs();
} }
int native_cpu_disable(void) int native_cpu_disable(void)
......
...@@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, ...@@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to * We have to send the IPI only to
* CPUs affected. * CPUs affected.
*/ */
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
while (!cpus_empty(flush_cpumask)) while (!cpus_empty(flush_cpumask))
/* nothing. lockup detection does not belong here */ /* nothing. lockup detection does not belong here */
......
...@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, ...@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to * We have to send the IPI only to
* CPUs affected. * CPUs affected.
*/ */
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
while (!cpus_empty(f->flush_cpumask)) while (!cpus_empty(f->flush_cpumask))
cpu_relax(); cpu_relax();
......
...@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { ...@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ } { }
}; };
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
return cpumask_of_cpu(cpu); cpus_clear(*retmask);
cpu_set(cpu, *retmask);
} }
static int probe_bigsmp(void) static int probe_bigsmp(void)
......
...@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
} }
#endif #endif
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu) ...@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
return domain;
} }
struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
...@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) ...@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0; return 0;
} }
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu) ...@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
return domain;
} }
struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
...@@ -24,7 +24,7 @@ static int probe_summit(void) ...@@ -24,7 +24,7 @@ static int probe_summit(void)
return 0; return 0;
} }
static cpumask_t vector_allocation_domain(int cpu) static void vector_allocation_domain(int cpu, cpumask_t *retmask)
{ {
/* Careful. Some cpus do not strictly honor the set of cpus /* Careful. Some cpus do not strictly honor the set of cpus
* specified in the interrupt destination when using lowest * specified in the interrupt destination when using lowest
...@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu) ...@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu)
* deliver interrupts to the wrong hyperthread when only one * deliver interrupts to the wrong hyperthread when only one
* hyperthread was specified in the interrupt desitination. * hyperthread was specified in the interrupt desitination.
*/ */
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
return domain;
} }
struct genapic apic_summit = APIC_INIT("summit", probe_summit); struct genapic apic_summit = APIC_INIT("summit", probe_summit);
...@@ -672,7 +672,7 @@ void __init smp_boot_cpus(void) ...@@ -672,7 +672,7 @@ void __init smp_boot_cpus(void)
/* loop over all the extended VIC CPUs and boot them. The /* loop over all the extended VIC CPUs and boot them. The
* Quad CPUs must be bootstrapped by their extended VIC cpu */ * Quad CPUs must be bootstrapped by their extended VIC cpu */
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
continue; continue;
do_boot_cpu(i); do_boot_cpu(i);
......
...@@ -278,7 +278,7 @@ void __init numa_init_array(void) ...@@ -278,7 +278,7 @@ void __init numa_init_array(void)
int rr, i; int rr, i;
rr = first_node(node_online_map); rr = first_node(node_online_map);
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
if (early_cpu_to_node(i) != NUMA_NO_NODE) if (early_cpu_to_node(i) != NUMA_NO_NODE)
continue; continue;
numa_set_node(i, rr); numa_set_node(i, rr);
...@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn) ...@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
memnodemap[0] = 0; memnodemap[0] = 0;
node_set_online(0); node_set_online(0);
node_set(0, node_possible_map); node_set(0, node_possible_map);
for (i = 0; i < NR_CPUS; i++) for (i = 0; i < nr_cpu_ids; i++)
numa_set_node(i, 0); numa_set_node(i, 0);
e820_register_active_regions(0, start_pfn, last_pfn); e820_register_active_regions(0, start_pfn, last_pfn);
setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
......
...@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) ...@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
if (!node_online(i)) if (!node_online(i))
setup_node_bootmem(i, nodes[i].start, nodes[i].end); setup_node_bootmem(i, nodes[i].start, nodes[i].end);
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
int node = early_cpu_to_node(i); int node = early_cpu_to_node(i);
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
......
...@@ -1079,7 +1079,7 @@ static void drop_other_mm_ref(void *info) ...@@ -1079,7 +1079,7 @@ static void drop_other_mm_ref(void *info)
static void xen_drop_mm_ref(struct mm_struct *mm) static void xen_drop_mm_ref(struct mm_struct *mm)
{ {
cpumask_t mask; cpumask_var_t mask;
unsigned cpu; unsigned cpu;
if (current->active_mm == mm) { if (current->active_mm == mm) {
...@@ -1091,7 +1091,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm) ...@@ -1091,7 +1091,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
} }
/* Get the "official" set of cpus referring to our pagetable. */ /* Get the "official" set of cpus referring to our pagetable. */
mask = mm->cpu_vm_mask; if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
}
return;
}
cpumask_copy(mask, &mm->cpu_vm_mask);
/* It's possible that a vcpu may have a stale reference to our /* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed cr3, because its in lazy mode, and it hasn't yet flushed
...@@ -1100,11 +1109,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm) ...@@ -1100,11 +1109,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
if needed. */ if needed. */
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
cpu_set(cpu, mask); cpumask_set_cpu(cpu, mask);
} }
if (!cpus_empty(mask)) if (!cpumask_empty(mask))
smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
free_cpumask_var(mask);
} }
#else #else
static void xen_drop_mm_ref(struct mm_struct *mm) static void xen_drop_mm_ref(struct mm_struct *mm)
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include "xen-ops.h" #include "xen-ops.h"
#include "mmu.h" #include "mmu.h"
cpumask_t xen_cpu_initialized_map; cpumask_var_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(int, resched_irq); static DEFINE_PER_CPU(int, resched_irq);
static DEFINE_PER_CPU(int, callfunc_irq); static DEFINE_PER_CPU(int, callfunc_irq);
...@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) ...@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
{ {
int i, rc; int i, rc;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
if (rc >= 0) { if (rc >= 0) {
num_processors++; num_processors++;
...@@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) ...@@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
if (xen_smp_intr_init(0)) if (xen_smp_intr_init(0))
BUG(); BUG();
xen_cpu_initialized_map = cpumask_of_cpu(0); if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
panic("could not allocate xen_cpu_initialized_map\n");
cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
/* Restrict the possible_map according to max_cpus. */ /* Restrict the possible_map according to max_cpus. */
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
continue; continue;
cpu_clear(cpu, cpu_possible_map); cpu_clear(cpu, cpu_possible_map);
} }
...@@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) ...@@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
struct vcpu_guest_context *ctxt; struct vcpu_guest_context *ctxt;
struct desc_struct *gdt; struct desc_struct *gdt;
if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
return 0; return 0;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
...@@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu) ...@@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu)
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
} }
static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) static void xen_send_IPI_mask(const struct cpumask *mask,
enum ipi_vector vector)
{ {
unsigned cpu; unsigned cpu;
cpus_and(mask, mask, cpu_online_map); for_each_cpu_and(cpu, mask, cpu_online_mask)
for_each_cpu_mask_nr(cpu, mask)
xen_send_IPI_one(cpu, vector); xen_send_IPI_one(cpu, vector);
} }
static void xen_smp_send_call_function_ipi(cpumask_t mask) static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
{ {
int cpu; int cpu;
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */ /* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu_mask_nr(cpu, mask) { for_each_cpu(cpu, mask) {
if (xen_vcpu_stolen(cpu)) { if (xen_vcpu_stolen(cpu)) {
HYPERVISOR_sched_op(SCHEDOP_yield, 0); HYPERVISOR_sched_op(SCHEDOP_yield, 0);
break; break;
...@@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) ...@@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
static void xen_smp_send_call_function_single_ipi(int cpu) static void xen_smp_send_call_function_single_ipi(int cpu)
{ {
xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); xen_send_IPI_mask(cpumask_of(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
} }
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
......
...@@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled) ...@@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled)
pfn_to_mfn(xen_start_info->console.domU.mfn); pfn_to_mfn(xen_start_info->console.domU.mfn);
} else { } else {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
xen_cpu_initialized_map = cpu_online_map; BUG_ON(xen_cpu_initialized_map == NULL);
cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
#endif #endif
xen_vcpu_restore(); xen_vcpu_restore();
} }
......
...@@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void); ...@@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void);
__cpuinit void xen_init_lock_cpu(int cpu); __cpuinit void xen_init_lock_cpu(int cpu);
void xen_uninit_lock_cpu(int cpu); void xen_uninit_lock_cpu(int cpu);
extern cpumask_t xen_cpu_initialized_map; extern cpumask_var_t xen_cpu_initialized_map;
#else #else
static inline void xen_smp_init(void) {} static inline void xen_smp_init(void) {}
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment