Commit 55acdddb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp/hotplug changes from Ingo Molnar:
 "Various cleanups to the SMP hotplug code - a continuing effort of
  Thomas et al"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  smpboot: Remove leftover declaration
  smp: Remove num_booting_cpus()
  smp: Remove ipi_call_lock[_irq]()/ipi_call_unlock[_irq]()
  POWERPC: Smp: remove call to ipi_call_lock()/ipi_call_unlock()
  SPARC: SMP: Remove call to ipi_call_lock_irq()/ipi_call_unlock_irq()
  ia64: SMP: Remove call to ipi_call_lock_irq()/ipi_call_unlock_irq()
  x86-smp-remove-call-to-ipi_call_lock-ipi_call_unlock
  tile: SMP: Remove call to ipi_call_lock()/ipi_call_unlock()
  S390: Smp: remove call to ipi_call_lock()/ipi_call_unlock()
  parisc: Smp: remove call to ipi_call_lock()/ipi_call_unlock()
  mn10300: SMP: Remove call to ipi_call_lock()/ipi_call_unlock()
  hexagon: SMP: Remove call to ipi_call_lock()/ipi_call_unlock()
parents 2eafeb6a b871a42b
...@@ -180,9 +180,7 @@ void __cpuinit start_secondary(void) ...@@ -180,9 +180,7 @@ void __cpuinit start_secondary(void)
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
ipi_call_lock();
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
ipi_call_unlock();
local_irq_enable(); local_irq_enable();
......
...@@ -382,7 +382,6 @@ smp_callin (void) ...@@ -382,7 +382,6 @@ smp_callin (void)
set_numa_node(cpu_to_node_map[cpuid]); set_numa_node(cpu_to_node_map[cpuid]);
set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
ipi_call_lock_irq();
spin_lock(&vector_lock); spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */ /* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid); __setup_vector_irq(cpuid);
...@@ -390,7 +389,6 @@ smp_callin (void) ...@@ -390,7 +389,6 @@ smp_callin (void)
set_cpu_online(cpuid, true); set_cpu_online(cpuid, true);
per_cpu(cpu_state, cpuid) = CPU_ONLINE; per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock); spin_unlock(&vector_lock);
ipi_call_unlock_irq();
smp_setup_percpu_timer(); smp_setup_percpu_timer();
......
...@@ -79,11 +79,6 @@ static __inline__ int cpu_number_map(int cpu) ...@@ -79,11 +79,6 @@ static __inline__ int cpu_number_map(int cpu)
return cpu; return cpu;
} }
static __inline__ unsigned int num_booting_cpus(void)
{
return cpumask_weight(&cpu_callout_map);
}
extern void smp_send_timer(void); extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int); extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
......
...@@ -876,9 +876,7 @@ static void __init smp_online(void) ...@@ -876,9 +876,7 @@ static void __init smp_online(void)
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
ipi_call_lock();
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
ipi_call_unlock();
local_irq_enable(); local_irq_enable();
} }
......
...@@ -300,9 +300,7 @@ smp_cpu_init(int cpunum) ...@@ -300,9 +300,7 @@ smp_cpu_init(int cpunum)
notify_cpu_starting(cpunum); notify_cpu_starting(cpunum);
ipi_call_lock();
set_cpu_online(cpunum, true); set_cpu_online(cpunum, true);
ipi_call_unlock();
/* Initialise the idle task for this CPU */ /* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
......
...@@ -571,7 +571,6 @@ void __devinit start_secondary(void *unused) ...@@ -571,7 +571,6 @@ void __devinit start_secondary(void *unused)
if (system_state == SYSTEM_RUNNING) if (system_state == SYSTEM_RUNNING)
vdso_data->processorCount++; vdso_data->processorCount++;
#endif #endif
ipi_call_lock();
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
/* Update sibling maps */ /* Update sibling maps */
...@@ -601,7 +600,6 @@ void __devinit start_secondary(void *unused) ...@@ -601,7 +600,6 @@ void __devinit start_secondary(void *unused)
of_node_put(np); of_node_put(np);
} }
of_node_put(l2_cache); of_node_put(l2_cache);
ipi_call_unlock();
local_irq_enable(); local_irq_enable();
......
...@@ -717,9 +717,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) ...@@ -717,9 +717,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
init_cpu_vtimer(); init_cpu_vtimer();
pfault_init(); pfault_init();
notify_cpu_starting(smp_processor_id()); notify_cpu_starting(smp_processor_id());
ipi_call_lock();
set_cpu_online(smp_processor_id(), true); set_cpu_online(smp_processor_id(), true);
ipi_call_unlock();
local_irq_enable(); local_irq_enable();
/* cpu_idle will call schedule for us */ /* cpu_idle will call schedule for us */
cpu_idle(); cpu_idle();
......
...@@ -103,8 +103,6 @@ void __cpuinit smp_callin(void) ...@@ -103,8 +103,6 @@ void __cpuinit smp_callin(void)
if (cheetah_pcache_forced_on) if (cheetah_pcache_forced_on)
cheetah_enable_pcache(); cheetah_enable_pcache();
local_irq_enable();
callin_flag = 1; callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t" __asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory"); "flush %%g6" : : : "memory");
...@@ -124,9 +122,8 @@ void __cpuinit smp_callin(void) ...@@ -124,9 +122,8 @@ void __cpuinit smp_callin(void)
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
rmb(); rmb();
ipi_call_lock_irq();
set_cpu_online(cpuid, true); set_cpu_online(cpuid, true);
ipi_call_unlock_irq(); local_irq_enable();
/* idle thread is expected to have preempt disabled */ /* idle thread is expected to have preempt disabled */
preempt_disable(); preempt_disable();
...@@ -1308,9 +1305,7 @@ int __cpu_disable(void) ...@@ -1308,9 +1305,7 @@ int __cpu_disable(void)
mdelay(1); mdelay(1);
local_irq_disable(); local_irq_disable();
ipi_call_lock();
set_cpu_online(cpu, false); set_cpu_online(cpu, false);
ipi_call_unlock();
cpu_map_rebuild(); cpu_map_rebuild();
......
...@@ -198,17 +198,7 @@ void __cpuinit online_secondary(void) ...@@ -198,17 +198,7 @@ void __cpuinit online_secondary(void)
notify_cpu_starting(smp_processor_id()); notify_cpu_starting(smp_processor_id());
/*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
* IPI recipients, and the time when the determination is made
* for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
*/
ipi_call_lock();
set_cpu_online(smp_processor_id(), 1); set_cpu_online(smp_processor_id(), 1);
ipi_call_unlock();
__get_cpu_var(cpu_state) = CPU_ONLINE; __get_cpu_var(cpu_state) = CPU_ONLINE;
/* Set up tile-specific state for this cpu. */ /* Set up tile-specific state for this cpu. */
......
...@@ -169,11 +169,6 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); ...@@ -169,11 +169,6 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
void smp_store_cpu_info(int id); void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
/* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void)
{
return cpumask_weight(cpu_callout_mask);
}
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd() #define wbinvd_on_cpu(cpu) wbinvd()
static inline int wbinvd_on_all_cpus(void) static inline int wbinvd_on_all_cpus(void)
......
...@@ -255,22 +255,13 @@ notrace static void __cpuinit start_secondary(void *unused) ...@@ -255,22 +255,13 @@ notrace static void __cpuinit start_secondary(void *unused)
check_tsc_sync_target(); check_tsc_sync_target();
/* /*
* We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of
* IPI recipients, and the time when the determination is made
* for which cpus receive the IPI. Holding this
* lock helps us to not include this cpu in a currently in progress
* smp_call_function().
*
* We need to hold vector_lock so there the set of online cpus * We need to hold vector_lock so there the set of online cpus
* does not change while we are assigning vectors to cpus. Holding * does not change while we are assigning vectors to cpus. Holding
* this lock ensures we don't half assign or remove an irq from a cpu. * this lock ensures we don't half assign or remove an irq from a cpu.
*/ */
ipi_call_lock();
lock_vector_lock(); lock_vector_lock();
set_cpu_online(smp_processor_id(), true); set_cpu_online(smp_processor_id(), true);
unlock_vector_lock(); unlock_vector_lock();
ipi_call_unlock();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
x86_platform.nmi_init(); x86_platform.nmi_init();
......
...@@ -80,9 +80,7 @@ static void __cpuinit cpu_bringup(void) ...@@ -80,9 +80,7 @@ static void __cpuinit cpu_bringup(void)
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
ipi_call_lock();
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
ipi_call_unlock();
this_cpu_write(cpu_state, CPU_ONLINE); this_cpu_write(cpu_state, CPU_ONLINE);
......
...@@ -90,10 +90,6 @@ void kick_all_cpus_sync(void); ...@@ -90,10 +90,6 @@ void kick_all_cpus_sync(void);
void __init call_function_init(void); void __init call_function_init(void);
void generic_smp_call_function_single_interrupt(void); void generic_smp_call_function_single_interrupt(void);
void generic_smp_call_function_interrupt(void); void generic_smp_call_function_interrupt(void);
void ipi_call_lock(void);
void ipi_call_unlock(void);
void ipi_call_lock_irq(void);
void ipi_call_unlock_irq(void);
#else #else
static inline void call_function_init(void) { } static inline void call_function_init(void) { }
#endif #endif
...@@ -181,7 +177,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info) ...@@ -181,7 +177,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
} while (0) } while (0)
static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0) #define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \ #define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info)) (up_smp_call_function(func, info))
......
...@@ -581,26 +581,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait) ...@@ -581,26 +581,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
return 0; return 0;
} }
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
raw_spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
raw_spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
raw_spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
raw_spin_unlock_irq(&call_function.lock);
}
#endif /* USE_GENERIC_SMP_HELPERS */ #endif /* USE_GENERIC_SMP_HELPERS */
/* Setup configured maximum number of CPUs to activate */ /* Setup configured maximum number of CPUs to activate */
......
...@@ -3,8 +3,6 @@ ...@@ -3,8 +3,6 @@
struct task_struct; struct task_struct;
int smpboot_prepare(unsigned int cpu);
#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
struct task_struct *idle_thread_get(unsigned int cpu); struct task_struct *idle_thread_get(unsigned int cpu);
void idle_thread_set_boot_cpu(void); void idle_thread_set_boot_cpu(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment