Commit 6c88159e authored by Dave Jones's avatar Dave Jones

[CPUFREQ] make ondemand governor aware of CPU domains

The following patch makes ondemand governor aware of policy->cpus.

policy->cpus mask lets multiple cpu use the same policy (useful where cpus
share the frequency state), a recent change in cpufreq core.

Now ondemand governor looks at all cpus in policy->cpus and takes its
frequency increase/decrease decisions based on most lightly loaded cpu in
the group. This patch will not affect systems where policy->cpus contain
only one cpu.
Signed-off-by: default avatar"Venkatesh Pallipadi" <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarDominik Brodowski <linux@brodo.de>
Signed-off-by: default avatarDave Jones <davej@redhat.com>
parent 463f7d6a
...@@ -229,10 +229,14 @@ static void dbs_check_cpu(int cpu) ...@@ -229,10 +229,14 @@ static void dbs_check_cpu(int cpu)
static int down_skip[NR_CPUS]; static int down_skip[NR_CPUS];
struct cpu_dbs_info_s *this_dbs_info; struct cpu_dbs_info_s *this_dbs_info;
struct cpufreq_policy *policy;
unsigned int j;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu); this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
if (!this_dbs_info->enable) if (!this_dbs_info->enable)
return; return;
policy = this_dbs_info->cur_policy;
/* /*
* The default safe range is 20% to 80% * The default safe range is 20% to 80%
* Every sampling_rate, we check * Every sampling_rate, we check
...@@ -246,12 +250,33 @@ static void dbs_check_cpu(int cpu) ...@@ -246,12 +250,33 @@ static void dbs_check_cpu(int cpu)
* Frequency reduction happens at minimum steps of * Frequency reduction happens at minimum steps of
* 5% of max_frequency * 5% of max_frequency
*/ */
/* Check for frequency increase */ /* Check for frequency increase */
total_idle_ticks = kstat_cpu(cpu).cpustat.idle + total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait; kstat_cpu(cpu).cpustat.iowait;
idle_ticks = total_idle_ticks - idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_up; this_dbs_info->prev_cpu_idle_up;
this_dbs_info->prev_cpu_idle_up = total_idle_ticks; this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
for_each_cpu_mask(j, policy->cpus) {
unsigned int tmp_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
if (j == cpu)
continue;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
total_idle_ticks = kstat_cpu(j).cpustat.idle +
kstat_cpu(j).cpustat.iowait;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_up;
j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
if (tmp_idle_ticks < idle_ticks)
idle_ticks = tmp_idle_ticks;
}
/* Scale idle ticks by 100 and compare with up and down ticks */ /* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100; idle_ticks *= 100;
...@@ -259,8 +284,7 @@ static void dbs_check_cpu(int cpu) ...@@ -259,8 +284,7 @@ static void dbs_check_cpu(int cpu)
sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate); sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) { if (idle_ticks < up_idle_ticks) {
__cpufreq_driver_target(this_dbs_info->cur_policy, __cpufreq_driver_target(policy, policy->max,
this_dbs_info->cur_policy->max,
CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
down_skip[cpu] = 0; down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks; this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
...@@ -272,12 +296,34 @@ static void dbs_check_cpu(int cpu) ...@@ -272,12 +296,34 @@ static void dbs_check_cpu(int cpu)
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return; return;
total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait;
idle_ticks = total_idle_ticks - idle_ticks = total_idle_ticks -
this_dbs_info->prev_cpu_idle_down; this_dbs_info->prev_cpu_idle_down;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
for_each_cpu_mask(j, policy->cpus) {
unsigned int tmp_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
if (j == cpu)
continue;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
/* Check for frequency increase */
total_idle_ticks = kstat_cpu(j).cpustat.idle +
kstat_cpu(j).cpustat.iowait;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_down;
j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
if (tmp_idle_ticks < idle_ticks)
idle_ticks = tmp_idle_ticks;
}
/* Scale idle ticks by 100 and compare with up and down ticks */ /* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100; idle_ticks *= 100;
down_skip[cpu] = 0; down_skip[cpu] = 0;
this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor; dbs_tuners_ins.sampling_down_factor;
...@@ -285,14 +331,14 @@ static void dbs_check_cpu(int cpu) ...@@ -285,14 +331,14 @@ static void dbs_check_cpu(int cpu)
sampling_rate_in_HZ(freq_down_sampling_rate); sampling_rate_in_HZ(freq_down_sampling_rate);
if (idle_ticks > down_idle_ticks ) { if (idle_ticks > down_idle_ticks ) {
freq_down_step = (5 * this_dbs_info->cur_policy->max) / 100; freq_down_step = (5 * policy->max) / 100;
/* max freq cannot be less than 100. But who knows.... */ /* max freq cannot be less than 100. But who knows.... */
if (unlikely(freq_down_step == 0)) if (unlikely(freq_down_step == 0))
freq_down_step = 5; freq_down_step = 5;
__cpufreq_driver_target(this_dbs_info->cur_policy, __cpufreq_driver_target(policy,
this_dbs_info->cur_policy->cur - freq_down_step, policy->cur - freq_down_step,
CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
return; return;
} }
...@@ -328,6 +374,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -328,6 +374,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
{ {
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
struct cpu_dbs_info_s *this_dbs_info; struct cpu_dbs_info_s *this_dbs_info;
unsigned int j;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu); this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
...@@ -344,14 +391,18 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -344,14 +391,18 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break; break;
down(&dbs_sem); down(&dbs_sem);
this_dbs_info->cur_policy = policy; for_each_cpu_mask(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
this_dbs_info->prev_cpu_idle_up = j_dbs_info->prev_cpu_idle_up =
kstat_cpu(cpu).cpustat.idle + kstat_cpu(j).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait; kstat_cpu(j).cpustat.iowait;
this_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_down =
kstat_cpu(cpu).cpustat.idle + kstat_cpu(j).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait; kstat_cpu(j).cpustat.iowait;
}
this_dbs_info->enable = 1; this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group); sysfs_create_group(&policy->kobj, &dbs_attr_group);
dbs_enable++; dbs_enable++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment