Commit eabd22c6 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

cpufreq: intel_pstate: Use different utilization update callbacks

Notice that some overhead in the utilization update callbacks
registered by intel_pstate in the active mode can be avoided if
those callbacks are tailored to specific configurations of the
driver.  For example, the utilization update callback for the HWP
enabled case only needs to update the average CPU performance
periodically whereas the utilization update callback for the
PID-based algorithm does not need to take IO-wait boosting into
account and so on.

With that in mind, define three utilization update callbacks for
three different use cases: HWP enabled, the CPU load "powersave"
P-state selection algorithm and the PID-based "powersave" P-state
selection algorithm and modify the driver initialization to
choose the callback matching its current configuration.
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 0042b2c0
...@@ -37,6 +37,9 @@ ...@@ -37,6 +37,9 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#define INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
#define INTEL_PSTATE_HWP_SAMPLING_INTERVAL (50 * NSEC_PER_MSEC)
#define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
...@@ -1676,7 +1679,11 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) ...@@ -1676,7 +1679,11 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization * that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then. * update hook and make the caller skip the sample then.
*/ */
return !!cpu->last_sample_time; if (cpu->last_sample_time) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
return false;
} }
static inline int32_t get_avg_frequency(struct cpudata *cpu) static inline int32_t get_avg_frequency(struct cpudata *cpu)
...@@ -1783,7 +1790,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) ...@@ -1783,7 +1790,7 @@ static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate)); wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
} }
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) static void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{ {
int from, target_pstate; int from, target_pstate;
struct sample *sample; struct sample *sample;
...@@ -1811,36 +1818,56 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) ...@@ -1811,36 +1818,56 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
fp_toint(cpu->iowait_boost * 100)); fp_toint(cpu->iowait_boost * 100));
} }
static void intel_pstate_update_util_hwp(struct update_util_data *data,
u64 time, unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns = time - cpu->sample.time;
if ((s64)delta_ns >= INTEL_PSTATE_HWP_SAMPLING_INTERVAL)
intel_pstate_sample(cpu, time);
}
static void intel_pstate_update_util_pid(struct update_util_data *data,
u64 time, unsigned int flags)
{
struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns = time - cpu->sample.time;
if ((s64)delta_ns < pid_params.sample_rate_ns)
return;
if (intel_pstate_sample(cpu, time))
intel_pstate_adjust_busy_pstate(cpu);
}
static void intel_pstate_update_util(struct update_util_data *data, u64 time, static void intel_pstate_update_util(struct update_util_data *data, u64 time,
unsigned int flags) unsigned int flags)
{ {
struct cpudata *cpu = container_of(data, struct cpudata, update_util); struct cpudata *cpu = container_of(data, struct cpudata, update_util);
u64 delta_ns; u64 delta_ns;
if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) { if (flags & SCHED_CPUFREQ_IOWAIT) {
if (flags & SCHED_CPUFREQ_IOWAIT) { cpu->iowait_boost = int_tofp(1);
cpu->iowait_boost = int_tofp(1); } else if (cpu->iowait_boost) {
} else if (cpu->iowait_boost) { /* Clear iowait_boost if the CPU may have been idle. */
/* Clear iowait_boost if the CPU may have been idle. */ delta_ns = time - cpu->last_update;
delta_ns = time - cpu->last_update; if (delta_ns > TICK_NSEC)
if (delta_ns > TICK_NSEC) cpu->iowait_boost = 0;
cpu->iowait_boost = 0;
}
cpu->last_update = time;
} }
cpu->last_update = time;
delta_ns = time - cpu->sample.time; delta_ns = time - cpu->sample.time;
if ((s64)delta_ns >= pid_params.sample_rate_ns) { if ((s64)delta_ns < INTEL_PSTATE_DEFAULT_SAMPLING_INTERVAL)
bool sample_taken = intel_pstate_sample(cpu, time); return;
if (sample_taken) { if (intel_pstate_sample(cpu, time))
intel_pstate_calc_avg_perf(cpu); intel_pstate_adjust_busy_pstate(cpu);
if (!hwp_active)
intel_pstate_adjust_busy_pstate(cpu);
}
}
} }
/* Utilization update callback to register in the active mode. */
static void (*update_util_cb)(struct update_util_data *data, u64 time,
unsigned int flags) = intel_pstate_update_util;
#define ICPU(model, policy) \ #define ICPU(model, policy) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
(unsigned long)&policy } (unsigned long)&policy }
...@@ -1938,8 +1965,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num) ...@@ -1938,8 +1965,7 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
/* Prevent intel_pstate_update_util() from using stale data. */ /* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0; cpu->sample.time = 0;
cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, cpufreq_add_update_util_hook(cpu_num, &cpu->update_util, update_util_cb);
intel_pstate_update_util);
cpu->update_util_set = true; cpu->update_util_set = true;
} }
...@@ -2405,6 +2431,9 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs) ...@@ -2405,6 +2431,9 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
pstate_funcs.get_target_pstate = funcs->get_target_pstate; pstate_funcs.get_target_pstate = funcs->get_target_pstate;
intel_pstate_use_acpi_profile(); intel_pstate_use_acpi_profile();
if (pstate_funcs.get_target_pstate == get_target_pstate_use_performance)
update_util_cb = intel_pstate_update_util_pid;
} }
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
...@@ -2549,11 +2578,11 @@ static int __init intel_pstate_init(void) ...@@ -2549,11 +2578,11 @@ static int __init intel_pstate_init(void)
if (x86_match_cpu(hwp_support_ids)) { if (x86_match_cpu(hwp_support_ids)) {
copy_cpu_funcs(&core_params.funcs); copy_cpu_funcs(&core_params.funcs);
if (no_hwp) { if (no_hwp) {
pstate_funcs.get_target_pstate = get_target_pstate_use_cpu_load; update_util_cb = intel_pstate_update_util;
} else { } else {
hwp_active++; hwp_active++;
intel_pstate.attr = hwp_cpufreq_attrs; intel_pstate.attr = hwp_cpufreq_attrs;
pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC; update_util_cb = intel_pstate_update_util_hwp;
goto hwp_cpu_matched; goto hwp_cpu_matched;
} }
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment