Commit 01599fca authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

cpufreq: use smp_call_function_[single|many]() in acpi-cpufreq.c

Atttempting to rid us of the problematic work_on_cpu().  Just use
smp_call_fuction_single() here.

This repairs a 10% sysbench(oltp)+mysql regression which Mike reported,
due to

  commit 6b44003e
  Author: Andrew Morton <akpm@linux-foundation.org>
  Date:   Thu Apr 9 09:50:37 2009 -0600

      work_on_cpu(): rewrite it to create a kernel thread on demand

It seems that the kernel calls these acpi-cpufreq functions at a quite
high frequency.

Valdis Kletnieks also reports that this causes 70-90 forks per second on
his hardware.

Cc: Valdis.Kletnieks@vt.edu
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Zhao Yakui <yakui.zhao@intel.com>
Acked-by: default avatarDave Jones <davej@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarMike Galbraith <efault@gmx.de>
Cc: "Zhang, Yanmin" <yanmin_zhang@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
[ Made it use smp_call_function_many() instead of looping over cpu's
  with smp_call_function_single()    - Linus ]
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8371f87c
...@@ -153,7 +153,8 @@ struct drv_cmd { ...@@ -153,7 +153,8 @@ struct drv_cmd {
u32 val; u32 val;
}; };
static long do_drv_read(void *_cmd) /* Called via smp_call_function_single(), on the target CPU */
static void do_drv_read(void *_cmd)
{ {
struct drv_cmd *cmd = _cmd; struct drv_cmd *cmd = _cmd;
u32 h; u32 h;
...@@ -170,10 +171,10 @@ static long do_drv_read(void *_cmd) ...@@ -170,10 +171,10 @@ static long do_drv_read(void *_cmd)
default: default:
break; break;
} }
return 0;
} }
static long do_drv_write(void *_cmd) /* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{ {
struct drv_cmd *cmd = _cmd; struct drv_cmd *cmd = _cmd;
u32 lo, hi; u32 lo, hi;
...@@ -192,23 +193,18 @@ static long do_drv_write(void *_cmd) ...@@ -192,23 +193,18 @@ static long do_drv_write(void *_cmd)
default: default:
break; break;
} }
return 0;
} }
static void drv_read(struct drv_cmd *cmd) static void drv_read(struct drv_cmd *cmd)
{ {
cmd->val = 0; cmd->val = 0;
work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1);
} }
static void drv_write(struct drv_cmd *cmd) static void drv_write(struct drv_cmd *cmd)
{ {
unsigned int i; smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
for_each_cpu(i, cmd->mask) {
work_on_cpu(i, do_drv_write, cmd);
}
} }
static u32 get_cur_val(const struct cpumask *mask) static u32 get_cur_val(const struct cpumask *mask)
...@@ -252,15 +248,13 @@ struct perf_pair { ...@@ -252,15 +248,13 @@ struct perf_pair {
} aperf, mperf; } aperf, mperf;
}; };
/* Called via smp_call_function_single(), on the target CPU */
static long read_measured_perf_ctrs(void *_cur) static void read_measured_perf_ctrs(void *_cur)
{ {
struct perf_pair *cur = _cur; struct perf_pair *cur = _cur;
rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi);
rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi); rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
return 0;
} }
/* /*
...@@ -283,7 +277,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, ...@@ -283,7 +277,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy,
unsigned int perf_percent; unsigned int perf_percent;
unsigned int retval; unsigned int retval;
if (!work_on_cpu(cpu, read_measured_perf_ctrs, &readin)) if (smp_call_function_single(cpu, read_measured_perf_ctrs, &cur, 1))
return 0; return 0;
cur.aperf.whole = readin.aperf.whole - cur.aperf.whole = readin.aperf.whole -
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment