Commit e1c56594 authored by Adrian Bunk's avatar Adrian Bunk Committed by Linus Torvalds

[PATCH] smp{,boot}.c cleanups

This patch contains the following cleanups on several architectures:
- make some needlessly global code static
- remove the following write-only (except for printk's) variables:
  - cache_decay_ticks
  - smp_threads_ready
  - cacheflush_time

I've only tried the compilation on i386, but I hope all mistakes I made 
are on unimportant architectures.  ;-)
Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a1a57d66
...@@ -78,8 +78,6 @@ static unsigned long hwrpb_cpu_present_mask __initdata = 0; ...@@ -78,8 +78,6 @@ static unsigned long hwrpb_cpu_present_mask __initdata = 0;
int smp_num_probed; /* Internal processor count */ int smp_num_probed; /* Internal processor count */
int smp_num_cpus = 1; /* Number that came online. */ int smp_num_cpus = 1; /* Number that came online. */
cycles_t cacheflush_time;
unsigned long cache_decay_ticks;
extern void calibrate_delay(void); extern void calibrate_delay(void);
...@@ -217,15 +215,6 @@ smp_tune_scheduling (int cpuid) ...@@ -217,15 +215,6 @@ smp_tune_scheduling (int cpuid)
} }
freq = hwrpb->cycle_freq ? : est_cycle_freq; freq = hwrpb->cycle_freq ? : est_cycle_freq;
cacheflush_time = (freq / 1000000) * (on_chip_cache << 10) / bandwidth;
cache_decay_ticks = cacheflush_time / (freq / 1000) * HZ / 1000;
printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
cacheflush_time/(freq/1000000),
(cacheflush_time*100/(freq/1000000)) % 100);
printk("task migration cache decay timeout: %ld msecs.\n",
(cache_decay_ticks + 1) * 1000 / HZ);
} }
/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
......
...@@ -77,9 +77,6 @@ u8 x86_cpu_to_apicid[NR_CPUS] = ...@@ -77,9 +77,6 @@ u8 x86_cpu_to_apicid[NR_CPUS] =
{ [0 ... NR_CPUS-1] = 0xff }; { [0 ... NR_CPUS-1] = 0xff };
EXPORT_SYMBOL(x86_cpu_to_apicid); EXPORT_SYMBOL(x86_cpu_to_apicid);
/* Set when the idlers are all forked */
int smp_threads_ready;
/* /*
* Trampoline 80x86 program as an array. * Trampoline 80x86 program as an array.
*/ */
...@@ -89,6 +86,8 @@ extern unsigned char trampoline_end []; ...@@ -89,6 +86,8 @@ extern unsigned char trampoline_end [];
static unsigned char *trampoline_base; static unsigned char *trampoline_base;
static int trampoline_exec; static int trampoline_exec;
static void map_cpu_to_logical_apicid(void);
/* /*
* Currently trivial. Write the real->protected mode * Currently trivial. Write the real->protected mode
* bootstrap into the page concerned. The caller * bootstrap into the page concerned. The caller
...@@ -319,7 +318,7 @@ extern void calibrate_delay(void); ...@@ -319,7 +318,7 @@ extern void calibrate_delay(void);
static atomic_t init_deasserted; static atomic_t init_deasserted;
void __init smp_callin(void) static void __init smp_callin(void)
{ {
int cpuid, phys_id; int cpuid, phys_id;
unsigned long timeout; unsigned long timeout;
...@@ -408,7 +407,7 @@ void __init smp_callin(void) ...@@ -408,7 +407,7 @@ void __init smp_callin(void)
synchronize_tsc_ap(); synchronize_tsc_ap();
} }
int cpucount; static int cpucount;
/* /*
* Activate a secondary processor. * Activate a secondary processor.
...@@ -506,7 +505,7 @@ static inline void unmap_cpu_to_node(int cpu) ...@@ -506,7 +505,7 @@ static inline void unmap_cpu_to_node(int cpu)
u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
void map_cpu_to_logical_apicid(void) static void map_cpu_to_logical_apicid(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int apicid = logical_smp_processor_id(); int apicid = logical_smp_processor_id();
...@@ -515,7 +514,7 @@ void map_cpu_to_logical_apicid(void) ...@@ -515,7 +514,7 @@ void map_cpu_to_logical_apicid(void)
map_cpu_to_node(cpu, apicid_to_node(apicid)); map_cpu_to_node(cpu, apicid_to_node(apicid));
} }
void unmap_cpu_to_logical_apicid(int cpu) static void unmap_cpu_to_logical_apicid(int cpu)
{ {
cpu_2_logical_apicid[cpu] = BAD_APICID; cpu_2_logical_apicid[cpu] = BAD_APICID;
unmap_cpu_to_node(cpu); unmap_cpu_to_node(cpu);
...@@ -847,9 +846,6 @@ static int __init do_boot_cpu(int apicid) ...@@ -847,9 +846,6 @@ static int __init do_boot_cpu(int apicid)
return boot_error; return boot_error;
} }
cycles_t cacheflush_time;
unsigned long cache_decay_ticks;
static void smp_tune_scheduling (void) static void smp_tune_scheduling (void)
{ {
unsigned long cachesize; /* kB */ unsigned long cachesize; /* kB */
...@@ -870,7 +866,6 @@ static void smp_tune_scheduling (void) ...@@ -870,7 +866,6 @@ static void smp_tune_scheduling (void)
* this basically disables processor-affinity * this basically disables processor-affinity
* scheduling on SMP without a TSC. * scheduling on SMP without a TSC.
*/ */
cacheflush_time = 0;
return; return;
} else { } else {
cachesize = boot_cpu_data.x86_cache_size; cachesize = boot_cpu_data.x86_cache_size;
...@@ -878,17 +873,7 @@ static void smp_tune_scheduling (void) ...@@ -878,17 +873,7 @@ static void smp_tune_scheduling (void)
cachesize = 16; /* Pentiums, 2x8kB cache */ cachesize = 16; /* Pentiums, 2x8kB cache */
bandwidth = 100; bandwidth = 100;
} }
cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
} }
cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
(long)cacheflush_time/(cpu_khz/1000),
((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
printk("task migration cache decay timeout: %ld msecs.\n",
cache_decay_ticks);
} }
/* /*
......
...@@ -37,10 +37,6 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m ...@@ -37,10 +37,6 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_m
/* CPU IRQ affinity -- set to all ones initially */ /* CPU IRQ affinity -- set to all ones initially */
static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL }; static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL };
/* Set when the idlers are all forked - Set in main.c but not actually
* used by any other parts of the kernel */
int smp_threads_ready = 0;
/* per CPU data structure (for /proc/cpuinfo et al), visible externally /* per CPU data structure (for /proc/cpuinfo et al), visible externally
* indexed physically */ * indexed physically */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
...@@ -81,14 +77,6 @@ cpumask_t cpu_online_map = CPU_MASK_NONE; ...@@ -81,14 +77,6 @@ cpumask_t cpu_online_map = CPU_MASK_NONE;
* by scheduler but indexed physically */ * by scheduler but indexed physically */
cpumask_t phys_cpu_present_map = CPU_MASK_NONE; cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
/* estimate of time used to flush the SMP-local cache - used in
* processor affinity calculations */
cycles_t cacheflush_time = 0;
/* cache decay ticks for scheduler---a fairly useless quantity for the
voyager system with its odd affinity and huge L3 cache */
unsigned long cache_decay_ticks = 20;
/* The internal functions */ /* The internal functions */
static void send_CPI(__u32 cpuset, __u8 cpi); static void send_CPI(__u32 cpuset, __u8 cpi);
......
...@@ -427,25 +427,11 @@ decay (char *str) ...@@ -427,25 +427,11 @@ decay (char *str)
{ {
int ticks; int ticks;
get_option (&str, &ticks); get_option (&str, &ticks);
cache_decay_ticks = ticks;
return 1; return 1;
} }
__setup("decay=", decay); __setup("decay=", decay);
/*
* # of ticks an idle task is considered cache-hot. Highly application-dependent. There
* are apps out there which are known to suffer significantly with values >= 4.
*/
unsigned long cache_decay_ticks = 10; /* equal to MIN_TIMESLICE */
static void
smp_tune_scheduling (void)
{
printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
(cache_decay_ticks + 1) * 1000 / HZ);
}
/* /*
* Initialize the logical CPU number to SAPICID mapping * Initialize the logical CPU number to SAPICID mapping
*/ */
...@@ -544,7 +530,6 @@ smp_prepare_cpus (unsigned int max_cpus) ...@@ -544,7 +530,6 @@ smp_prepare_cpus (unsigned int max_cpus)
printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
smp_tune_scheduling();
/* /*
* If SMP should be disabled, then really disable it! * If SMP should be disabled, then really disable it!
......
...@@ -81,9 +81,6 @@ static cpumask_t cpu_callin_map; ...@@ -81,9 +81,6 @@ static cpumask_t cpu_callin_map;
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned; struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int smp_threads_ready;
static int cpucount; static int cpucount;
static cpumask_t smp_commenced_mask; static cpumask_t smp_commenced_mask;
...@@ -106,8 +103,6 @@ spinlock_t ipi_lock[NR_IPIS]; ...@@ -106,8 +103,6 @@ spinlock_t ipi_lock[NR_IPIS];
static unsigned int calibration_result; static unsigned int calibration_result;
unsigned long cache_decay_ticks = HZ / 100;
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/* Function Prototypes */ /* Function Prototypes */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
......
...@@ -46,9 +46,6 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ ...@@ -46,9 +46,6 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(phys_cpu_present_map); EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_online_map);
cycles_t cacheflush_time;
unsigned long cache_decay_ticks;
static void smp_tune_scheduling (void) static void smp_tune_scheduling (void)
{ {
struct cache_desc *cd = &current_cpu_data.scache; struct cache_desc *cd = &current_cpu_data.scache;
...@@ -71,25 +68,10 @@ static void smp_tune_scheduling (void) ...@@ -71,25 +68,10 @@ static void smp_tune_scheduling (void)
* L1 cache), on PIIs it's around 50-100 usecs, depending on * L1 cache), on PIIs it's around 50-100 usecs, depending on
* the cache size) * the cache size)
*/ */
if (!cpu_khz) { if (!cpu_khz)
/*
* This basically disables processor-affinity scheduling on SMP
* without a cycle counter. Currently all SMP capable MIPS
* processors have a cycle counter.
*/
cacheflush_time = 0;
return; return;
}
cachesize = cd->linesz * cd->sets * cd->ways; cachesize = cd->linesz * cd->sets * cd->ways;
cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
(long)cacheflush_time/(cpu_khz/1000),
((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
printk("task migration cache decay timeout: %ld msecs.\n",
(cache_decay_ticks + 1) * 1000 / HZ);
} }
extern void __init calibrate_delay(void); extern void __init calibrate_delay(void);
......
...@@ -60,8 +60,6 @@ volatile struct task_struct *smp_init_current_idle_task; ...@@ -60,8 +60,6 @@ volatile struct task_struct *smp_init_current_idle_task;
static volatile int cpu_now_booting = 0; /* track which CPU is booting */ static volatile int cpu_now_booting = 0; /* track which CPU is booting */
unsigned long cache_decay_ticks; /* declared by include/linux/sched.h */
static int parisc_max_cpus = 1; static int parisc_max_cpus = 1;
/* online cpus are ones that we've managed to bring up completely /* online cpus are ones that we've managed to bring up completely
...@@ -583,8 +581,6 @@ void __devinit smp_prepare_boot_cpu(void) ...@@ -583,8 +581,6 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(bootstrap_processor, cpu_online_map); cpu_set(bootstrap_processor, cpu_online_map);
cpu_set(bootstrap_processor, cpu_present_map); cpu_set(bootstrap_processor, cpu_present_map);
cache_decay_ticks = HZ/100; /* FIXME very rough. */
} }
......
...@@ -35,14 +35,12 @@ ...@@ -35,14 +35,12 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/xmon.h> #include <asm/xmon.h>
int smp_threads_ready;
volatile int smp_commenced; volatile int smp_commenced;
int smp_tb_synchronized; int smp_tb_synchronized;
struct cpuinfo_PPC cpu_data[NR_CPUS]; struct cpuinfo_PPC cpu_data[NR_CPUS];
struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 }; struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
atomic_t ipi_recv; atomic_t ipi_recv;
atomic_t ipi_sent; atomic_t ipi_sent;
unsigned long cache_decay_ticks = HZ/100;
cpumask_t cpu_online_map; cpumask_t cpu_online_map;
cpumask_t cpu_possible_map; cpumask_t cpu_possible_map;
int smp_hw_index[NR_CPUS]; int smp_hw_index[NR_CPUS];
......
...@@ -54,8 +54,6 @@ ...@@ -54,8 +54,6 @@
#define DBG(fmt...) #define DBG(fmt...)
#endif #endif
int smp_threads_ready;
cpumask_t cpu_possible_map = CPU_MASK_NONE; cpumask_t cpu_possible_map = CPU_MASK_NONE;
cpumask_t cpu_online_map = CPU_MASK_NONE; cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
......
...@@ -50,12 +50,9 @@ extern volatile int __cpu_logical_map[]; ...@@ -50,12 +50,9 @@ extern volatile int __cpu_logical_map[];
*/ */
struct _lowcore *lowcore_ptr[NR_CPUS]; struct _lowcore *lowcore_ptr[NR_CPUS];
cycles_t cacheflush_time=0;
int smp_threads_ready=0; /* Set when the idlers are all forked. */
cpumask_t cpu_online_map; cpumask_t cpu_online_map;
cpumask_t cpu_possible_map; cpumask_t cpu_possible_map;
unsigned long cache_decay_ticks = 0;
static struct task_struct *current_set[NR_CPUS]; static struct task_struct *current_set[NR_CPUS];
......
...@@ -34,14 +34,12 @@ ...@@ -34,14 +34,12 @@
* but is designed to be usable regardless if there's an MMU * but is designed to be usable regardless if there's an MMU
* present or not. * present or not.
*/ */
int smp_threads_ready = 0;
struct sh_cpuinfo cpu_data[NR_CPUS]; struct sh_cpuinfo cpu_data[NR_CPUS];
extern void per_cpu_trap_init(void); extern void per_cpu_trap_init(void);
cpumask_t cpu_possible_map; cpumask_t cpu_possible_map;
cpumask_t cpu_online_map; cpumask_t cpu_online_map;
unsigned long cache_decay_ticks = HZ / 100;
static atomic_t cpus_booted = ATOMIC_INIT(0); static atomic_t cpus_booted = ATOMIC_INIT(0);
/* These are defined by the board-specific code. */ /* These are defined by the board-specific code. */
...@@ -129,7 +127,6 @@ int start_secondary(void *unused) ...@@ -129,7 +127,6 @@ int start_secondary(void *unused)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
smp_threads_ready = 1;
smp_mb(); smp_mb();
} }
......
...@@ -36,15 +36,12 @@ ...@@ -36,15 +36,12 @@
volatile int smp_processors_ready = 0; volatile int smp_processors_ready = 0;
int smp_num_cpus = 1; int smp_num_cpus = 1;
int smp_threads_ready=0;
volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,}; volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
unsigned char boot_cpu_id = 0; unsigned char boot_cpu_id = 0;
unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
int smp_activated = 0; int smp_activated = 0;
volatile int __cpu_number_map[NR_CPUS]; volatile int __cpu_number_map[NR_CPUS];
volatile int __cpu_logical_map[NR_CPUS]; volatile int __cpu_logical_map[NR_CPUS];
cycles_t cacheflush_time = 0; /* XXX */
unsigned long cache_decay_ticks = 100;
cpumask_t cpu_online_map = CPU_MASK_NONE; cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE; cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
......
...@@ -45,7 +45,6 @@ extern void calibrate_delay(void); ...@@ -45,7 +45,6 @@ extern void calibrate_delay(void);
extern volatile int smp_processors_ready; extern volatile int smp_processors_ready;
extern int smp_num_cpus; extern int smp_num_cpus;
static int smp_highest_cpu; static int smp_highest_cpu;
extern int smp_threads_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern struct cpuinfo_sparc cpu_data[NR_CPUS]; extern struct cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
......
...@@ -41,7 +41,6 @@ extern void calibrate_delay(void); ...@@ -41,7 +41,6 @@ extern void calibrate_delay(void);
extern volatile int smp_processors_ready; extern volatile int smp_processors_ready;
extern int smp_num_cpus; extern int smp_num_cpus;
extern int smp_threads_ready;
extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
extern int smp_activated; extern int smp_activated;
......
...@@ -1055,9 +1055,6 @@ void __init smp_tick_init(void) ...@@ -1055,9 +1055,6 @@ void __init smp_tick_init(void)
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
} }
cycles_t cacheflush_time;
unsigned long cache_decay_ticks;
extern unsigned long cheetah_tune_scheduling(void); extern unsigned long cheetah_tune_scheduling(void);
static void __init smp_tune_scheduling(void) static void __init smp_tune_scheduling(void)
...@@ -1078,10 +1075,8 @@ static void __init smp_tune_scheduling(void) ...@@ -1078,10 +1075,8 @@ static void __init smp_tune_scheduling(void)
* of moving a process from one cpu to another). * of moving a process from one cpu to another).
*/ */
printk("SMP: Calibrating ecache flush... "); printk("SMP: Calibrating ecache flush... ");
if (tlb_type == cheetah || tlb_type == cheetah_plus) { if (tlb_type == cheetah || tlb_type == cheetah_plus)
cacheflush_time = cheetah_tune_scheduling(); return;
goto report;
}
cpu_find_by_instance(0, &cpu_node, NULL); cpu_find_by_instance(0, &cpu_node, NULL);
ecache_size = prom_getintdefault(cpu_node, ecache_size = prom_getintdefault(cpu_node,
...@@ -1124,24 +1119,8 @@ static void __init smp_tune_scheduling(void) ...@@ -1124,24 +1119,8 @@ static void __init smp_tune_scheduling(void)
raw = (tick2 - tick1); raw = (tick2 - tick1);
/* Dampen it a little, considering two processes
* sharing the cache and fitting.
*/
cacheflush_time = (raw - (raw >> 2));
free_pages(orig_flush_base, order); free_pages(orig_flush_base, order);
} else {
cacheflush_time = ((ecache_size << 2) +
(ecache_size << 1));
} }
report:
/* Convert ticks/sticks to jiffies. */
cache_decay_ticks = cacheflush_time / timer_tick_offset;
if (cache_decay_ticks < 1)
cache_decay_ticks = 1;
printk("Using heuristic of %ld cycles, %ld ticks.\n",
cacheflush_time, cache_decay_ticks);
} }
/* /proc/profile writes can call this, don't __init it please. */ /* /proc/profile writes can call this, don't __init it please. */
......
...@@ -41,15 +41,9 @@ EXPORT_SYMBOL(cpu_possible_map); ...@@ -41,15 +41,9 @@ EXPORT_SYMBOL(cpu_possible_map);
*/ */
struct cpuinfo_um cpu_data[NR_CPUS]; struct cpuinfo_um cpu_data[NR_CPUS];
/* Set when the idlers are all forked */
int smp_threads_ready = 0;
/* A statistic, can be a little off */ /* A statistic, can be a little off */
int num_reschedules_sent = 0; int num_reschedules_sent = 0;
/* Small, random number, never changed */
unsigned long cache_decay_ticks = 5;
/* Not changed after boot */ /* Not changed after boot */
struct task_struct *idle_threads[NR_CPUS]; struct task_struct *idle_threads[NR_CPUS];
......
...@@ -70,9 +70,6 @@ static cpumask_t smp_commenced_mask; ...@@ -70,9 +70,6 @@ static cpumask_t smp_commenced_mask;
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int smp_threads_ready;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
/* /*
...@@ -249,7 +246,7 @@ static void __init synchronize_tsc_ap (void) ...@@ -249,7 +246,7 @@ static void __init synchronize_tsc_ap (void)
static atomic_t init_deasserted; static atomic_t init_deasserted;
void __init smp_callin(void) static void __init smp_callin(void)
{ {
int cpuid, phys_id; int cpuid, phys_id;
unsigned long timeout; unsigned long timeout;
...@@ -338,7 +335,7 @@ void __init smp_callin(void) ...@@ -338,7 +335,7 @@ void __init smp_callin(void)
synchronize_tsc_ap(); synchronize_tsc_ap();
} }
int cpucount; static int cpucount;
/* /*
* Activate a secondary processor. * Activate a secondary processor.
...@@ -661,9 +658,6 @@ static void __init do_boot_cpu (int apicid) ...@@ -661,9 +658,6 @@ static void __init do_boot_cpu (int apicid)
} }
} }
cycles_t cacheflush_time;
unsigned long cache_decay_ticks;
static void smp_tune_scheduling (void) static void smp_tune_scheduling (void)
{ {
int cachesize; /* kB */ int cachesize; /* kB */
...@@ -680,11 +674,6 @@ static void smp_tune_scheduling (void) ...@@ -680,11 +674,6 @@ static void smp_tune_scheduling (void)
*/ */
if (!cpu_khz) { if (!cpu_khz) {
/*
* this basically disables processor-affinity
* scheduling on SMP without a TSC.
*/
cacheflush_time = 0;
return; return;
} else { } else {
cachesize = boot_cpu_data.x86_cache_size; cachesize = boot_cpu_data.x86_cache_size;
...@@ -692,17 +681,7 @@ static void smp_tune_scheduling (void) ...@@ -692,17 +681,7 @@ static void smp_tune_scheduling (void)
cachesize = 16; /* Pentiums, 2x8kB cache */ cachesize = 16; /* Pentiums, 2x8kB cache */
bandwidth = 100; bandwidth = 100;
} }
cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
} }
cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
printk(KERN_INFO "per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
(long)cacheflush_time/(cpu_khz/1000),
((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
(cache_decay_ticks + 1) * 1000 / HZ);
} }
/* /*
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
*/ */
typedef unsigned int cycles_t; typedef unsigned int cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
......
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
return 0; return 0;
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
return 0; return 0;
......
...@@ -62,9 +62,6 @@ static inline int num_booting_cpus(void) ...@@ -62,9 +62,6 @@ static inline int num_booting_cpus(void)
return cpus_weight(cpu_callout_map); return cpus_weight(cpu_callout_map);
} }
extern void map_cpu_to_logical_apicid(void);
extern void unmap_cpu_to_logical_apicid(int cpu);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#ifdef APIC_DEFINITION #ifdef APIC_DEFINITION
......
...@@ -32,8 +32,6 @@ ...@@ -32,8 +32,6 @@
*/ */
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
unsigned long long ret=0; unsigned long long ret=0;
......
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static __inline__ cycles_t get_cycles (void) static __inline__ cycles_t get_cycles (void)
{ {
return 0; return 0;
......
...@@ -45,7 +45,6 @@ ...@@ -45,7 +45,6 @@
*/ */
typedef unsigned int cycles_t; typedef unsigned int cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
return mfctl(16); return mfctl(16);
......
...@@ -19,8 +19,6 @@ typedef unsigned long cycles_t; ...@@ -19,8 +19,6 @@ typedef unsigned long cycles_t;
* Currently only used on SMP. * Currently only used on SMP.
*/ */
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles(void) static inline cycles_t get_cycles(void)
{ {
cycles_t ret = 0; cycles_t ret = 0;
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles(void) static inline cycles_t get_cycles(void)
{ {
cycles_t cycles; cycles_t cycles;
......
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static __inline__ cycles_t get_cycles (void) static __inline__ cycles_t get_cycles (void)
{ {
return 0; return 0;
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
static __inline__ cycles_t get_cycles (void) static __inline__ cycles_t get_cycles (void)
{ {
return 0; return 0;
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
/* XXX Maybe do something better at some point... -DaveM */ /* XXX Maybe do something better at some point... -DaveM */
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
#define get_cycles() (0) #define get_cycles() (0)
#endif #endif
...@@ -3,8 +3,6 @@ ...@@ -3,8 +3,6 @@
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
#define cacheflush_time (0)
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
return 0; return 0;
......
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
typedef unsigned long long cycles_t; typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles (void) static inline cycles_t get_cycles (void)
{ {
unsigned long long ret; unsigned long long ret;
......
...@@ -175,7 +175,6 @@ extern void cpu_init (void); ...@@ -175,7 +175,6 @@ extern void cpu_init (void);
extern void trap_init(void); extern void trap_init(void);
extern void update_process_times(int user); extern void update_process_times(int user);
extern void scheduler_tick(void); extern void scheduler_tick(void);
extern unsigned long cache_decay_ticks;
/* Attach to any functions which should be ignored in wchan output. */ /* Attach to any functions which should be ignored in wchan output. */
#define __sched __attribute__((__section__(".sched.text"))) #define __sched __attribute__((__section__(".sched.text")))
......
...@@ -71,11 +71,6 @@ static inline int on_each_cpu(void (*func) (void *info), void *info, ...@@ -71,11 +71,6 @@ static inline int on_each_cpu(void (*func) (void *info), void *info,
return ret; return ret;
} }
/*
* True once the per process idle is forked
*/
extern int smp_threads_ready;
#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
#define MSG_ALL 0x8001 #define MSG_ALL 0x8001
...@@ -102,7 +97,6 @@ void smp_prepare_boot_cpu(void); ...@@ -102,7 +97,6 @@ void smp_prepare_boot_cpu(void);
# define smp_processor_id() 0 # define smp_processor_id() 0
#endif #endif
#define hard_smp_processor_id() 0 #define hard_smp_processor_id() 0
#define smp_threads_ready 1
#define smp_call_function(func,info,retry,wait) ({ 0; }) #define smp_call_function(func,info,retry,wait) ({ 0; })
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) #define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule(int cpu) { }
......
...@@ -361,7 +361,6 @@ static void __init smp_init(void) ...@@ -361,7 +361,6 @@ static void __init smp_init(void)
#if 0 #if 0
/* Get other processors into their bootup holding patterns. */ /* Get other processors into their bootup holding patterns. */
smp_threads_ready=1;
smp_commence(); smp_commence();
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment