Commit ec55bbd7 authored by John Hawkes's avatar John Hawkes Committed by James Bottomley

[PATCH] 2.5.70 remove smp_send_reschedule() cruft

smp_send_reschedule_all() is unused in 2.5 and can be eliminated.
parent 05742d9d
...@@ -460,17 +460,6 @@ void smp_send_reschedule(int cpu) ...@@ -460,17 +460,6 @@ void smp_send_reschedule(int cpu)
send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR); send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
} }
/*
* this function sends a reschedule IPI to all (other) CPUs.
* This should only be used if some 'global' task became runnable,
* such as a RT task, that must be handled now. The first CPU
* that manages to grab the task will run it.
*/
void smp_send_reschedule_all(void)
{
send_IPI_allbutself(RESCHEDULE_VECTOR);
}
/* /*
* Structure and data for smp_call_function(). This is designed to minimise * Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner. * static memory requirements. It also looks cleaner.
......
...@@ -205,24 +205,6 @@ smp_send_reschedule (int cpu) ...@@ -205,24 +205,6 @@ smp_send_reschedule (int cpu)
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
} }
/*
* This function sends a reschedule IPI to all (other) CPUs. This should only be used if
* some 'global' task became runnable, such as a RT task, that must be handled now. The
* first CPU that manages to grab the task will run it.
*/
void
smp_send_reschedule_all (void)
{
int i;
int cpu = get_cpu(); /* disable preemption */
for (i = 0; i < NR_CPUS; i++)
if (cpu_online(i) && i != cpu)
smp_send_reschedule(i);
put_cpu();
}
void void
smp_flush_tlb_all (void) smp_flush_tlb_all (void)
{ {
......
...@@ -392,17 +392,6 @@ void smp_send_reschedule(int cpu) ...@@ -392,17 +392,6 @@ void smp_send_reschedule(int cpu)
smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0); smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0);
} }
/*
* this function sends a reschedule IPI to all (other) CPUs.
* This should only be used if some 'global' task became runnable,
* such as a RT task, that must be handled now. The first CPU
* that manages to grab the task will run it.
*/
void smp_send_reschedule_all(void)
{
smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_RESCHEDULE, 0, 0);
}
#ifdef CONFIG_XMON #ifdef CONFIG_XMON
void smp_send_xmon_break(int cpu) void smp_send_xmon_break(int cpu)
{ {
......
...@@ -41,7 +41,6 @@ extern int cpu_sibling_map[]; ...@@ -41,7 +41,6 @@ extern int cpu_sibling_map[];
extern void smp_flush_tlb(void); extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu); extern void smp_send_reschedule(int cpu);
extern void smp_send_reschedule_all(void);
extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void); extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void); extern void zap_low_mappings (void);
......
...@@ -136,8 +136,6 @@ extern void smp_do_timer (struct pt_regs *regs); ...@@ -136,8 +136,6 @@ extern void smp_do_timer (struct pt_regs *regs);
extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info, extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
int retry, int wait); int retry, int wait);
extern void smp_send_reschedule (int cpu); extern void smp_send_reschedule (int cpu);
extern void smp_send_reschedule_all (void);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* _ASM_IA64_SMP_H */ #endif /* _ASM_IA64_SMP_H */
...@@ -34,7 +34,6 @@ extern void smp_send_tlb_invalidate(int); ...@@ -34,7 +34,6 @@ extern void smp_send_tlb_invalidate(int);
extern void smp_send_xmon_break(int cpu); extern void smp_send_xmon_break(int cpu);
struct pt_regs; struct pt_regs;
extern void smp_message_recv(int, struct pt_regs *); extern void smp_message_recv(int, struct pt_regs *);
extern void smp_send_reschedule_all(void);
#define NO_PROC_ID 0xFF /* No processor magic marker */ #define NO_PROC_ID 0xFF /* No processor magic marker */
......
...@@ -42,7 +42,6 @@ extern int pic_mode; ...@@ -42,7 +42,6 @@ extern int pic_mode;
extern void smp_flush_tlb(void); extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu); extern void smp_send_reschedule(int cpu);
extern void smp_send_reschedule_all(void);
extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void); extern void (*mtrr_hook) (void);
extern void zap_low_mappings(void); extern void zap_low_mappings(void);
......
...@@ -110,7 +110,6 @@ void smp_prepare_boot_cpu(void); ...@@ -110,7 +110,6 @@ void smp_prepare_boot_cpu(void);
#define smp_call_function(func,info,retry,wait) ({ 0; }) #define smp_call_function(func,info,retry,wait) ({ 0; })
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) #define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule(int cpu) { }
static inline void smp_send_reschedule_all(void) { }
#define cpu_online_map 1 #define cpu_online_map 1
#define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; })
#define num_online_cpus() 1 #define num_online_cpus() 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment