Commit e75135e6 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge back cpufreq material for v5.2.

parents 4ab52646 108ec36b
...@@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag) ...@@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
acpi_processor_ppc_ost(pr->handle, 0); acpi_processor_ppc_ost(pr->handle, 0);
} }
if (ret >= 0) if (ret >= 0)
cpufreq_update_policy(pr->id); cpufreq_update_limits(pr->id);
} }
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
......
...@@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void) ...@@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (!pcidev) { if (!pcidev) {
if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV; return -ENODEV;
} }
......
...@@ -34,11 +34,6 @@ ...@@ -34,11 +34,6 @@
static LIST_HEAD(cpufreq_policy_list); static LIST_HEAD(cpufreq_policy_list);
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
}
/* Macros to iterate over CPU policies */ /* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \ #define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \ list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
...@@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy) ...@@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
} }
EXPORT_SYMBOL_GPL(cpufreq_cpu_put); EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
/**
* cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
* @policy: cpufreq policy returned by cpufreq_cpu_acquire().
*/
void cpufreq_cpu_release(struct cpufreq_policy *policy)
{
if (WARN_ON(!policy))
return;
lockdep_assert_held(&policy->rwsem);
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
}
/**
* cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
* @cpu: CPU to find the policy for.
*
* Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
* if the policy returned by it is not NULL, acquire its rwsem for writing.
* Return the policy if it is active or release it and return NULL otherwise.
*
* The policy returned by this function has to be released with the help of
* cpufreq_cpu_release() in order to release its rwsem and balance its usage
* counter properly.
*/
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!policy)
return NULL;
down_write(&policy->rwsem);
if (policy_is_inactive(policy)) {
cpufreq_cpu_release(policy);
return NULL;
}
return policy;
}
/********************************************************************* /*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES * * EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/ *********************************************************************/
...@@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) ...@@ -669,9 +709,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
return ret; return ret;
} }
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
/** /**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/ */
...@@ -2229,8 +2266,8 @@ EXPORT_SYMBOL(cpufreq_get_policy); ...@@ -2229,8 +2266,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
* *
* The cpuinfo part of @policy is not updated by this function. * The cpuinfo part of @policy is not updated by this function.
*/ */
static int cpufreq_set_policy(struct cpufreq_policy *policy, int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy) struct cpufreq_policy *new_policy)
{ {
struct cpufreq_governor *old_gov; struct cpufreq_governor *old_gov;
int ret; int ret;
...@@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ...@@ -2337,17 +2374,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/ */
void cpufreq_update_policy(unsigned int cpu) void cpufreq_update_policy(unsigned int cpu)
{ {
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy; struct cpufreq_policy new_policy;
if (!policy) if (!policy)
return; return;
down_write(&policy->rwsem);
if (policy_is_inactive(policy))
goto unlock;
/* /*
* BIOS might change freq behind our back * BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change * -> ask driver for current freq and notify governors about a change
...@@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu) ...@@ -2364,12 +2396,26 @@ void cpufreq_update_policy(unsigned int cpu)
cpufreq_set_policy(policy, &new_policy); cpufreq_set_policy(policy, &new_policy);
unlock: unlock:
up_write(&policy->rwsem); cpufreq_cpu_release(policy);
cpufreq_cpu_put(policy);
} }
EXPORT_SYMBOL(cpufreq_update_policy); EXPORT_SYMBOL(cpufreq_update_policy);
/**
* cpufreq_update_limits - Update policy limits for a given CPU.
* @cpu: CPU to update the policy limits for.
*
* Invoke the driver's ->update_limits callback if present or call
* cpufreq_update_policy() for @cpu.
*/
void cpufreq_update_limits(unsigned int cpu)
{
if (cpufreq_driver->update_limits)
cpufreq_driver->update_limits(cpu);
else
cpufreq_update_policy(cpu);
}
EXPORT_SYMBOL_GPL(cpufreq_update_limits);
/********************************************************************* /*********************************************************************
* BOOST * * BOOST *
*********************************************************************/ *********************************************************************/
......
...@@ -179,6 +179,7 @@ struct vid_data { ...@@ -179,6 +179,7 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or * based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from * not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one. * the maximum reported non-turbo one.
* @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity. * P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
...@@ -187,6 +188,7 @@ struct vid_data { ...@@ -187,6 +188,7 @@ struct vid_data {
struct global_params { struct global_params {
bool no_turbo; bool no_turbo;
bool turbo_disabled; bool turbo_disabled;
bool turbo_disabled_mf;
int max_perf_pct; int max_perf_pct;
int min_perf_pct; int min_perf_pct;
}; };
...@@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data) ...@@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
u64 epb; u64 epb;
int ret; int ret;
if (!static_cpu_has(X86_FEATURE_EPB)) if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO; return -ENXIO;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
...@@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) ...@@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{ {
s16 epp; s16 epp;
if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/* /*
* When hwp_req_data is 0, means that caller didn't read * When hwp_req_data is 0, means that caller didn't read
* MSR_HWP_REQUEST, so need to read and get EPP. * MSR_HWP_REQUEST, so need to read and get EPP.
...@@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref) ...@@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
u64 epb; u64 epb;
int ret; int ret;
if (!static_cpu_has(X86_FEATURE_EPB)) if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO; return -ENXIO;
ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb); ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
...@@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) ...@@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
if (epp < 0) if (epp < 0)
return epp; return epp;
if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == HWP_EPP_PERFORMANCE) if (epp == HWP_EPP_PERFORMANCE)
return 1; return 1;
if (epp <= HWP_EPP_BALANCE_PERFORMANCE) if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
...@@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data) ...@@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
return 3; return 3;
else else
return 4; return 4;
} else if (static_cpu_has(X86_FEATURE_EPB)) { } else if (boot_cpu_has(X86_FEATURE_EPB)) {
/* /*
* Range: * Range:
* 0x00-0x03 : Performance * 0x00-0x03 : Performance
...@@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, ...@@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
mutex_lock(&intel_pstate_limits_lock); mutex_lock(&intel_pstate_limits_lock);
if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
u64 value; u64 value;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value); ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
...@@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu) ...@@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
epp = cpu_data->epp_powersave; epp = cpu_data->epp_powersave;
} }
update_epp: update_epp:
if (static_cpu_has(X86_FEATURE_HWP_EPP)) { if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24); value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24; value |= (u64)epp << 24;
} else { } else {
...@@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu) ...@@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
value |= HWP_MIN_PERF(min_perf); value |= HWP_MIN_PERF(min_perf);
/* Set EPP/EPB to min */ /* Set EPP/EPB to min */
if (static_cpu_has(X86_FEATURE_HWP_EPP)) if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
else else
intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE); intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
...@@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void) ...@@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu); cpufreq_update_policy(cpu);
} }
static void intel_pstate_update_max_freq(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy;
struct cpudata *cpudata;
if (!policy)
return;
cpudata = all_cpu_data[cpu];
policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
memcpy(&new_policy, policy, sizeof(*policy));
new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
new_policy.min = min(policy->user_policy.min, new_policy.max);
cpufreq_set_policy(policy, &new_policy);
cpufreq_cpu_release(policy);
}
static void intel_pstate_update_limits(unsigned int cpu)
{
mutex_lock(&intel_pstate_driver_lock);
update_turbo_state();
/*
* If turbo has been turned on or off globally, policy limits for
* all CPUs need to be updated to reflect that.
*/
if (global.turbo_disabled_mf != global.turbo_disabled) {
global.turbo_disabled_mf = global.turbo_disabled;
for_each_possible_cpu(cpu)
intel_pstate_update_max_freq(cpu);
} else {
cpufreq_update_policy(cpu);
}
mutex_unlock(&intel_pstate_driver_lock);
}
/************************** sysfs begin ************************/ /************************** sysfs begin ************************/
#define show_one(file_name, object) \ #define show_one(file_name, object) \
static ssize_t show_##file_name \ static ssize_t show_##file_name \
...@@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void) ...@@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata) static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{ {
/* First disable HWP notification interrupt as we don't process them */ /* First disable HWP notification interrupt as we don't process them */
if (static_cpu_has(X86_FEATURE_HWP_NOTIFY)) if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
...@@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) ...@@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */ /* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state(); update_turbo_state();
global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ? policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling; policy->cpuinfo.max_freq *= cpu->pstate.scaling;
...@@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = { ...@@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
.init = intel_pstate_cpu_init, .init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit, .exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu, .stop_cpu = intel_pstate_stop_cpu,
.update_limits = intel_pstate_update_limits,
.name = "intel_pstate", .name = "intel_pstate",
}; };
...@@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = { ...@@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
.init = intel_cpufreq_cpu_init, .init = intel_cpufreq_cpu_init,
.exit = intel_pstate_cpu_exit, .exit = intel_pstate_cpu_exit,
.stop_cpu = intel_cpufreq_stop_cpu, .stop_cpu = intel_cpufreq_stop_cpu,
.update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq", .name = "intel_cpufreq",
}; };
......
...@@ -1178,7 +1178,7 @@ static int powernowk8_init(void) ...@@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
unsigned int i, supported_cpus = 0; unsigned int i, supported_cpus = 0;
int ret; int ret;
if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
__request_acpi_cpufreq(); __request_acpi_cpufreq();
return -ENODEV; return -ENODEV;
} }
......
...@@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) ...@@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif #endif
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
}
static inline bool policy_is_shared(struct cpufreq_policy *policy) static inline bool policy_is_shared(struct cpufreq_policy *policy)
{ {
return cpumask_weight(policy->cpus) > 1; return cpumask_weight(policy->cpus) > 1;
...@@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu); ...@@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu);
void disable_cpufreq(void); void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
void cpufreq_update_policy(unsigned int cpu); void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void); bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
...@@ -322,6 +333,9 @@ struct cpufreq_driver { ...@@ -322,6 +333,9 @@ struct cpufreq_driver {
/* should be defined, if possible */ /* should be defined, if possible */
unsigned int (*get)(unsigned int cpu); unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
void (*update_limits)(unsigned int cpu);
/* optional */ /* optional */
int (*bios_limit)(int cpu, unsigned int *limit); int (*bios_limit)(int cpu, unsigned int *limit);
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <linux/sched/cpufreq.h> #include <linux/sched/cpufreq.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
struct sugov_tunables { struct sugov_tunables {
struct gov_attr_set attr_set; struct gov_attr_set attr_set;
unsigned int rate_limit_us; unsigned int rate_limit_us;
...@@ -51,7 +53,6 @@ struct sugov_cpu { ...@@ -51,7 +53,6 @@ struct sugov_cpu {
u64 last_update; u64 last_update;
unsigned long bw_dl; unsigned long bw_dl;
unsigned long min;
unsigned long max; unsigned long max;
/* The field below is for single-CPU policies only: */ /* The field below is for single-CPU policies only: */
...@@ -291,8 +292,8 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) ...@@ -291,8 +292,8 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
* *
* The IO wait boost of a task is disabled after a tick since the last update * The IO wait boost of a task is disabled after a tick since the last update
* of a CPU. If a new IO wait boost is requested after more then a tick, then * of a CPU. If a new IO wait boost is requested after more then a tick, then
* we enable the boost starting from the minimum frequency, which improves * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
* energy efficiency by ignoring sporadic wakeups from IO. * efficiency by ignoring sporadic wakeups from IO.
*/ */
static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
bool set_iowait_boost) bool set_iowait_boost)
...@@ -303,7 +304,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, ...@@ -303,7 +304,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
if (delta_ns <= TICK_NSEC) if (delta_ns <= TICK_NSEC)
return false; return false;
sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0; sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
sg_cpu->iowait_boost_pending = set_iowait_boost; sg_cpu->iowait_boost_pending = set_iowait_boost;
return true; return true;
...@@ -317,8 +318,9 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, ...@@ -317,8 +318,9 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
* *
* Each time a task wakes up after an IO operation, the CPU utilization can be * Each time a task wakes up after an IO operation, the CPU utilization can be
* boosted to a certain utilization which doubles at each "frequent and * boosted to a certain utilization which doubles at each "frequent and
* successive" wakeup from IO, ranging from the utilization of the minimum * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
* OPP to the utilization of the maximum OPP. * of the maximum OPP.
*
* To keep doubling, an IO boost has to be requested at least once per tick, * To keep doubling, an IO boost has to be requested at least once per tick,
* otherwise we restart from the utilization of the minimum OPP. * otherwise we restart from the utilization of the minimum OPP.
*/ */
...@@ -349,7 +351,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, ...@@ -349,7 +351,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
} }
/* First wakeup after IO: start with minimum boost */ /* First wakeup after IO: start with minimum boost */
sg_cpu->iowait_boost = sg_cpu->min; sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
} }
/** /**
...@@ -389,7 +391,7 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, ...@@ -389,7 +391,7 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
* No boost pending; reduce the boost value. * No boost pending; reduce the boost value.
*/ */
sg_cpu->iowait_boost >>= 1; sg_cpu->iowait_boost >>= 1;
if (sg_cpu->iowait_boost < sg_cpu->min) { if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
sg_cpu->iowait_boost = 0; sg_cpu->iowait_boost = 0;
return util; return util;
} }
...@@ -826,9 +828,6 @@ static int sugov_start(struct cpufreq_policy *policy) ...@@ -826,9 +828,6 @@ static int sugov_start(struct cpufreq_policy *policy)
memset(sg_cpu, 0, sizeof(*sg_cpu)); memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu; sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy; sg_cpu->sg_policy = sg_policy;
sg_cpu->min =
(SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
policy->cpuinfo.max_freq;
} }
for_each_cpu(cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment