Commit b23ee846 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] use per-cpu data for ia32 profiler

Patch from Ravikiran G Thirumalai <kiran@in.ibm.com>

Converts the ia32 kernel profiler to use use the percpu area infrastructure.
parent 9bba8dd6
...@@ -52,7 +52,7 @@ int using_apic_timer = 0; ...@@ -52,7 +52,7 @@ int using_apic_timer = 0;
int prof_multiplier[NR_CPUS] = { 1, }; int prof_multiplier[NR_CPUS] = { 1, };
int prof_old_multiplier[NR_CPUS] = { 1, }; int prof_old_multiplier[NR_CPUS] = { 1, };
int prof_counter[NR_CPUS] = { 1, }; DEFINE_PER_CPU(int, prof_counter) = 1;
int get_maxlvt(void) int get_maxlvt(void)
{ {
...@@ -997,7 +997,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -997,7 +997,7 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile(regs); x86_do_profile(regs);
if (--prof_counter[cpu] <= 0) { if (--per_cpu(prof_counter, cpu) <= 0) {
/* /*
* The multiplier may have changed since the last time we got * The multiplier may have changed since the last time we got
* to this point as a result of the user writing to * to this point as a result of the user writing to
...@@ -1006,10 +1006,12 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1006,10 +1006,12 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs)
* *
* Interrupts are already masked off at this point. * Interrupts are already masked off at this point.
*/ */
prof_counter[cpu] = prof_multiplier[cpu]; per_cpu(prof_counter, cpu) = prof_multiplier[cpu];
if (prof_counter[cpu] != prof_old_multiplier[cpu]) { if (per_cpu(prof_counter, cpu) != prof_old_multiplier[cpu]) {
__setup_APIC_LVTT(calibration_result/prof_counter[cpu]); __setup_APIC_LVTT(
prof_old_multiplier[cpu] = prof_counter[cpu]; calibration_result/
per_cpu(prof_counter, cpu));
prof_old_multiplier[cpu] = per_cpu(prof_counter, cpu);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -935,10 +935,6 @@ static void smp_tune_scheduling (void) ...@@ -935,10 +935,6 @@ static void smp_tune_scheduling (void)
* Cycle through the processors sending APIC IPIs to boot each. * Cycle through the processors sending APIC IPIs to boot each.
*/ */
extern int prof_multiplier[NR_CPUS];
extern int prof_old_multiplier[NR_CPUS];
extern int prof_counter[NR_CPUS];
static int boot_cpu_logical_apicid; static int boot_cpu_logical_apicid;
/* Where the IO area was mapped on multiquad, always 0 otherwise */ /* Where the IO area was mapped on multiquad, always 0 otherwise */
void *xquad_portio; void *xquad_portio;
...@@ -949,17 +945,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -949,17 +945,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
{ {
int apicid, cpu, bit; int apicid, cpu, bit;
/*
* Initialize the logical to physical CPU number mapping
* and the per-CPU profiling counter/multiplier
*/
for (cpu = 0; cpu < NR_CPUS; cpu++) {
prof_counter[cpu] = 1;
prof_old_multiplier[cpu] = 1;
prof_multiplier[cpu] = 1;
}
/* /*
* Setup boot CPU information * Setup boot CPU information
*/ */
......
...@@ -236,7 +236,7 @@ static __u32 trampoline_base; ...@@ -236,7 +236,7 @@ static __u32 trampoline_base;
/* The per cpu profile stuff - used in smp_local_timer_interrupt */ /* The per cpu profile stuff - used in smp_local_timer_interrupt */
static unsigned int prof_multiplier[NR_CPUS] __cacheline_aligned = { 1, }; static unsigned int prof_multiplier[NR_CPUS] __cacheline_aligned = { 1, };
static unsigned int prof_old_multiplier[NR_CPUS] __cacheline_aligned = { 1, }; static unsigned int prof_old_multiplier[NR_CPUS] __cacheline_aligned = { 1, };
static unsigned int prof_counter[NR_CPUS] __cacheline_aligned = { 1, }; static DEFINE_PER_CPU(unsigned int, prof_counter) = 1;
/* the map used to check if a CPU has booted */ /* the map used to check if a CPU has booted */
static __u32 cpu_booted_map; static __u32 cpu_booted_map;
...@@ -393,9 +393,6 @@ find_smp_config(void) ...@@ -393,9 +393,6 @@ find_smp_config(void)
/* initialize the CPU structures (moved from smp_boot_cpus) */ /* initialize the CPU structures (moved from smp_boot_cpus) */
for(i=0; i<NR_CPUS; i++) { for(i=0; i<NR_CPUS; i++) {
prof_counter[i] = 1;
prof_old_multiplier[i] = 1;
prof_multiplier[i] = 1;
cpu_irq_affinity[i] = ~0; cpu_irq_affinity[i] = ~0;
} }
cpu_online_map = (1<<boot_cpu_id); cpu_online_map = (1<<boot_cpu_id);
...@@ -1312,7 +1309,7 @@ smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1312,7 +1309,7 @@ smp_local_timer_interrupt(struct pt_regs * regs)
x86_do_profile(regs); x86_do_profile(regs);
if (--prof_counter[cpu] <= 0) { if (--per_cpu(prof_counter, cpu) <= 0) {
/* /*
* The multiplier may have changed since the last time we got * The multiplier may have changed since the last time we got
* to this point as a result of the user writing to * to this point as a result of the user writing to
...@@ -1321,10 +1318,10 @@ smp_local_timer_interrupt(struct pt_regs * regs) ...@@ -1321,10 +1318,10 @@ smp_local_timer_interrupt(struct pt_regs * regs)
* *
* Interrupts are already masked off at this point. * Interrupts are already masked off at this point.
*/ */
prof_counter[cpu] = prof_multiplier[cpu]; per_cpu(prof_counter,cpu) = prof_multiplier[cpu];
if (prof_counter[cpu] != prof_old_multiplier[cpu]) { if (per_cpu(prof_counter, cpu) != prof_old_multiplier[cpu]) {
/* FIXME: need to update the vic timer tick here */ /* FIXME: need to update the vic timer tick here */
prof_old_multiplier[cpu] = prof_counter[cpu]; prof_old_multiplier[cpu] = per_cpu(prof_counter, cpu);
} }
update_process_times(user_mode(regs)); update_process_times(user_mode(regs));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment