Commit 2addac72 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge schedutil governor updates for v4.12.

parents 2dee4b0e 1b72e7fd
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC) #define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC)
#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
#define INTEL_CPUFREQ_TRANSITION_DELAY 500
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#include <acpi/processor.h> #include <acpi/processor.h>
...@@ -2237,6 +2238,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -2237,6 +2238,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret; return ret;
policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
/* This reflects the intel_pstate_get_cpu_pstates() setting. */ /* This reflects the intel_pstate_get_cpu_pstates() setting. */
policy->cur = policy->cpuinfo.min_freq; policy->cur = policy->cpuinfo.min_freq;
......
...@@ -120,6 +120,13 @@ struct cpufreq_policy { ...@@ -120,6 +120,13 @@ struct cpufreq_policy {
bool fast_switch_possible; bool fast_switch_possible;
bool fast_switch_enabled; bool fast_switch_enabled;
/*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
*/
unsigned int transition_delay_us;
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */ /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq; unsigned int cached_target_freq;
int cached_resolved_idx; int cached_resolved_idx;
......
...@@ -117,6 +117,7 @@ extern void tick_nohz_idle_enter(void); ...@@ -117,6 +117,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void); extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void); extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void); extern ktime_t tick_nohz_get_sleep_length(void);
extern unsigned long tick_nohz_get_idle_calls(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */ #else /* !CONFIG_NO_HZ_COMMON */
......
...@@ -61,6 +61,11 @@ struct sugov_cpu { ...@@ -61,6 +61,11 @@ struct sugov_cpu {
unsigned long util; unsigned long util;
unsigned long max; unsigned long max;
unsigned int flags; unsigned int flags;
/* The field below is for single-CPU policies only. */
#ifdef CONFIG_NO_HZ_COMMON
unsigned long saved_idle_calls;
#endif
}; };
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
...@@ -93,22 +98,23 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, ...@@ -93,22 +98,23 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
{ {
struct cpufreq_policy *policy = sg_policy->policy; struct cpufreq_policy *policy = sg_policy->policy;
if (sg_policy->next_freq == next_freq)
return;
if (sg_policy->next_freq > next_freq)
next_freq = (sg_policy->next_freq + next_freq) >> 1;
sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time; sg_policy->last_freq_update_time = time;
if (policy->fast_switch_enabled) { if (policy->fast_switch_enabled) {
if (sg_policy->next_freq == next_freq) {
trace_cpu_frequency(policy->cur, smp_processor_id());
return;
}
sg_policy->next_freq = next_freq;
next_freq = cpufreq_driver_fast_switch(policy, next_freq); next_freq = cpufreq_driver_fast_switch(policy, next_freq);
if (next_freq == CPUFREQ_ENTRY_INVALID) if (next_freq == CPUFREQ_ENTRY_INVALID)
return; return;
policy->cur = next_freq; policy->cur = next_freq;
trace_cpu_frequency(next_freq, smp_processor_id()); trace_cpu_frequency(next_freq, smp_processor_id());
} else if (sg_policy->next_freq != next_freq) { } else {
sg_policy->next_freq = next_freq;
sg_policy->work_in_progress = true; sg_policy->work_in_progress = true;
irq_work_queue(&sg_policy->irq_work); irq_work_queue(&sg_policy->irq_work);
} }
...@@ -192,6 +198,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, ...@@ -192,6 +198,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
sg_cpu->iowait_boost >>= 1; sg_cpu->iowait_boost >>= 1;
} }
#ifdef CONFIG_NO_HZ_COMMON
static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
{
unsigned long idle_calls = tick_nohz_get_idle_calls();
bool ret = idle_calls == sg_cpu->saved_idle_calls;
sg_cpu->saved_idle_calls = idle_calls;
return ret;
}
#else
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
#endif /* CONFIG_NO_HZ_COMMON */
static void sugov_update_single(struct update_util_data *hook, u64 time, static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int flags) unsigned int flags)
{ {
...@@ -200,6 +219,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -200,6 +219,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
struct cpufreq_policy *policy = sg_policy->policy; struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util, max; unsigned long util, max;
unsigned int next_f; unsigned int next_f;
bool busy;
sugov_set_iowait_boost(sg_cpu, time, flags); sugov_set_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time; sg_cpu->last_update = time;
...@@ -207,40 +227,37 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -207,40 +227,37 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
if (!sugov_should_update_freq(sg_policy, time)) if (!sugov_should_update_freq(sg_policy, time))
return; return;
busy = sugov_cpu_is_busy(sg_cpu);
if (flags & SCHED_CPUFREQ_RT_DL) { if (flags & SCHED_CPUFREQ_RT_DL) {
next_f = policy->cpuinfo.max_freq; next_f = policy->cpuinfo.max_freq;
} else { } else {
sugov_get_util(&util, &max); sugov_get_util(&util, &max);
sugov_iowait_boost(sg_cpu, &util, &max); sugov_iowait_boost(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max); next_f = get_next_freq(sg_policy, util, max);
/*
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
if (busy && next_f < sg_policy->next_freq)
next_f = sg_policy->next_freq;
} }
sugov_update_commit(sg_policy, time, next_f); sugov_update_commit(sg_policy, time, next_f);
} }
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
unsigned long util, unsigned long max,
unsigned int flags)
{ {
struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy; struct cpufreq_policy *policy = sg_policy->policy;
unsigned int max_f = policy->cpuinfo.max_freq;
u64 last_freq_update_time = sg_policy->last_freq_update_time; u64 last_freq_update_time = sg_policy->last_freq_update_time;
unsigned long util = 0, max = 1;
unsigned int j; unsigned int j;
if (flags & SCHED_CPUFREQ_RT_DL)
return max_f;
sugov_iowait_boost(sg_cpu, &util, &max);
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu; struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
unsigned long j_util, j_max; unsigned long j_util, j_max;
s64 delta_ns; s64 delta_ns;
if (j == smp_processor_id())
continue;
j_sg_cpu = &per_cpu(sugov_cpu, j);
/* /*
* If the CPU utilization was last updated before the previous * If the CPU utilization was last updated before the previous
* frequency update and the time elapsed between the last update * frequency update and the time elapsed between the last update
...@@ -254,7 +271,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, ...@@ -254,7 +271,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
continue; continue;
} }
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
return max_f; return policy->cpuinfo.max_freq;
j_util = j_sg_cpu->util; j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max; j_max = j_sg_cpu->max;
...@@ -289,7 +306,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, ...@@ -289,7 +306,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sg_cpu->last_update = time; sg_cpu->last_update = time;
if (sugov_should_update_freq(sg_policy, time)) { if (sugov_should_update_freq(sg_policy, time)) {
next_f = sugov_next_freq_shared(sg_cpu, util, max, flags); if (flags & SCHED_CPUFREQ_RT_DL)
next_f = sg_policy->policy->cpuinfo.max_freq;
else
next_f = sugov_next_freq_shared(sg_cpu);
sugov_update_commit(sg_policy, time, next_f); sugov_update_commit(sg_policy, time, next_f);
} }
...@@ -473,7 +494,6 @@ static int sugov_init(struct cpufreq_policy *policy) ...@@ -473,7 +494,6 @@ static int sugov_init(struct cpufreq_policy *policy)
{ {
struct sugov_policy *sg_policy; struct sugov_policy *sg_policy;
struct sugov_tunables *tunables; struct sugov_tunables *tunables;
unsigned int lat;
int ret = 0; int ret = 0;
/* State should be equivalent to EXIT */ /* State should be equivalent to EXIT */
...@@ -512,10 +532,16 @@ static int sugov_init(struct cpufreq_policy *policy) ...@@ -512,10 +532,16 @@ static int sugov_init(struct cpufreq_policy *policy)
goto stop_kthread; goto stop_kthread;
} }
tunables->rate_limit_us = LATENCY_MULTIPLIER; if (policy->transition_delay_us) {
lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC; tunables->rate_limit_us = policy->transition_delay_us;
if (lat) } else {
tunables->rate_limit_us *= lat; unsigned int lat;
tunables->rate_limit_us = LATENCY_MULTIPLIER;
lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
if (lat)
tunables->rate_limit_us *= lat;
}
policy->governor_data = sg_policy; policy->governor_data = sg_policy;
sg_policy->tunables = tunables; sg_policy->tunables = tunables;
......
...@@ -993,6 +993,18 @@ ktime_t tick_nohz_get_sleep_length(void) ...@@ -993,6 +993,18 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length; return ts->sleep_length;
} }
/**
* tick_nohz_get_idle_calls - return the current idle calls counter value
*
* Called from the schedutil frequency scaling governor in scheduler context.
*/
unsigned long tick_nohz_get_idle_calls(void)
{
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
return ts->idle_calls;
}
static void tick_nohz_account_idle_ticks(struct tick_sched *ts) static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
{ {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment