Commit 8843f405 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
 "These fix a few issues related to running intel_pstate in the passive
  mode with HWP enabled, correct the handling of the max_cstate module
  parameter in intel_idle and make a few janitorial changes.

  Specifics:

   - Modify Kconfig to prevent configuring either the "conservative" or
     the "ondemand" governor as the default cpufreq governor if
     intel_pstate is selected, in which case "schedutil" is the default
     choice for the default governor setting (Rafael Wysocki).

   - Modify the cpufreq core, intel_pstate and the schedutil governor to
     avoid missing updates of the HWP max limit when intel_pstate
     operates in the passive mode with HWP enabled (Rafael Wysocki).

   - Fix max_cstate module parameter handling in intel_idle for
     processor models with C-state tables coming from ACPI (Chen Yu).

   - Clean up assorted pieces of power management code (Jackie Zamow,
     Tom Rix, Zhang Qilong)"

* tag 'pm-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq: schedutil: Always call driver if CPUFREQ_NEED_UPDATE_LIMITS is set
  cpufreq: Introduce cpufreq_driver_test_flags()
  cpufreq: speedstep: remove unneeded semicolon
  PM: sleep: fix typo in kernel/power/process.c
  intel_idle: Fix max_cstate for processor models without C-state tables
  cpufreq: intel_pstate: Avoid missing HWP max updates in passive mode
  cpufreq: Introduce CPUFREQ_NEED_UPDATE_LIMITS driver flag
  cpufreq: Avoid configuring old governors as default with intel_pstate
  cpufreq: e_powersaver: remove unreachable break
parents 88098fd6 dea47cf4
...@@ -71,6 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE ...@@ -71,6 +71,7 @@ config CPU_FREQ_DEFAULT_GOV_USERSPACE
config CPU_FREQ_DEFAULT_GOV_ONDEMAND config CPU_FREQ_DEFAULT_GOV_ONDEMAND
bool "ondemand" bool "ondemand"
depends on !(X86_INTEL_PSTATE && SMP)
select CPU_FREQ_GOV_ONDEMAND select CPU_FREQ_GOV_ONDEMAND
select CPU_FREQ_GOV_PERFORMANCE select CPU_FREQ_GOV_PERFORMANCE
help help
...@@ -83,6 +84,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND ...@@ -83,6 +84,7 @@ config CPU_FREQ_DEFAULT_GOV_ONDEMAND
config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
bool "conservative" bool "conservative"
depends on !(X86_INTEL_PSTATE && SMP)
select CPU_FREQ_GOV_CONSERVATIVE select CPU_FREQ_GOV_CONSERVATIVE
select CPU_FREQ_GOV_PERFORMANCE select CPU_FREQ_GOV_PERFORMANCE
help help
......
...@@ -1907,6 +1907,18 @@ void cpufreq_resume(void) ...@@ -1907,6 +1907,18 @@ void cpufreq_resume(void)
} }
} }
/**
* cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
* @flags: Flags to test against the current cpufreq driver's flags.
*
* Assumes that the driver is there, so callers must ensure that this is the
* case.
*/
bool cpufreq_driver_test_flags(u16 flags)
{
return !!(cpufreq_driver->flags & flags);
}
/** /**
* cpufreq_get_current_driver - return current driver's name * cpufreq_get_current_driver - return current driver's name
* *
...@@ -2187,7 +2199,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, ...@@ -2187,7 +2199,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
* exactly same freq is called again and so we can save on few function * exactly same freq is called again and so we can save on few function
* calls. * calls.
*/ */
if (target_freq == policy->cur) if (target_freq == policy->cur &&
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
return 0; return 0;
/* Save last value to restore later on errors */ /* Save last value to restore later on errors */
......
...@@ -223,7 +223,6 @@ static int eps_cpu_init(struct cpufreq_policy *policy) ...@@ -223,7 +223,6 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
case EPS_BRAND_C3: case EPS_BRAND_C3:
pr_cont("C3\n"); pr_cont("C3\n");
return -ENODEV; return -ENODEV;
break;
} }
/* Enable Enhanced PowerSaver */ /* Enable Enhanced PowerSaver */
rdmsrl(MSR_IA32_MISC_ENABLE, val); rdmsrl(MSR_IA32_MISC_ENABLE, val);
......
...@@ -2568,14 +2568,12 @@ static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate, ...@@ -2568,14 +2568,12 @@ static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
int old_pstate = cpu->pstate.current_pstate; int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate); target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
if (target_pstate != old_pstate) { if (hwp_active) {
intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
cpu->pstate.current_pstate = target_pstate;
} else if (target_pstate != old_pstate) {
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
cpu->pstate.current_pstate = target_pstate; cpu->pstate.current_pstate = target_pstate;
if (hwp_active)
intel_cpufreq_adjust_hwp(cpu, target_pstate,
fast_switch);
else
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate,
fast_switch);
} }
intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
...@@ -3032,6 +3030,7 @@ static int __init intel_pstate_init(void) ...@@ -3032,6 +3030,7 @@ static int __init intel_pstate_init(void)
hwp_mode_bdw = id->driver_data; hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs; intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs;
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
if (!default_driver) if (!default_driver)
default_driver = &intel_pstate; default_driver = &intel_pstate;
......
...@@ -593,7 +593,6 @@ static void longhaul_setup_voltagescaling(void) ...@@ -593,7 +593,6 @@ static void longhaul_setup_voltagescaling(void)
break; break;
default: default:
return; return;
break;
} }
if (min_vid_speed >= highest_speed) if (min_vid_speed >= highest_speed)
return; return;
......
...@@ -240,7 +240,7 @@ unsigned int speedstep_get_frequency(enum speedstep_processor processor) ...@@ -240,7 +240,7 @@ unsigned int speedstep_get_frequency(enum speedstep_processor processor)
return pentium3_get_frequency(processor); return pentium3_get_frequency(processor);
default: default:
return 0; return 0;
}; }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(speedstep_get_frequency); EXPORT_SYMBOL_GPL(speedstep_get_frequency);
......
...@@ -1239,7 +1239,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) ...@@ -1239,7 +1239,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
struct acpi_processor_cx *cx; struct acpi_processor_cx *cx;
struct cpuidle_state *state; struct cpuidle_state *state;
if (intel_idle_max_cstate_reached(cstate)) if (intel_idle_max_cstate_reached(cstate - 1))
break; break;
cx = &acpi_state_table.states[cstate]; cx = &acpi_state_table.states[cstate];
......
...@@ -298,7 +298,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name) ...@@ -298,7 +298,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver { struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN]; char name[CPUFREQ_NAME_LEN];
u8 flags; u16 flags;
void *driver_data; void *driver_data;
/* needed by all drivers */ /* needed by all drivers */
...@@ -422,9 +422,18 @@ struct cpufreq_driver { ...@@ -422,9 +422,18 @@ struct cpufreq_driver {
*/ */
#define CPUFREQ_IS_COOLING_DEV BIT(7) #define CPUFREQ_IS_COOLING_DEV BIT(7)
/*
* Set by drivers that need to update internale upper and lower boundaries along
* with the target frequency and so the core and governors should also invoke
* the diver if the target frequency does not change, but the policy min or max
* may have changed.
*/
#define CPUFREQ_NEED_UPDATE_LIMITS BIT(8)
int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
bool cpufreq_driver_test_flags(u16 flags);
const char *cpufreq_get_current_driver(void); const char *cpufreq_get_current_driver(void);
void *cpufreq_get_driver_data(void); void *cpufreq_get_driver_data(void);
......
...@@ -146,7 +146,7 @@ int freeze_processes(void) ...@@ -146,7 +146,7 @@ int freeze_processes(void)
BUG_ON(in_atomic()); BUG_ON(in_atomic());
/* /*
* Now that the whole userspace is frozen we need to disbale * Now that the whole userspace is frozen we need to disable
* the OOM killer to disallow any further interference with * the OOM killer to disallow any further interference with
* killable tasks. There is no guarantee oom victims will * killable tasks. There is no guarantee oom victims will
* ever reach a point they go away we have to wait with a timeout. * ever reach a point they go away we have to wait with a timeout.
......
...@@ -102,7 +102,8 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) ...@@ -102,7 +102,8 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq) unsigned int next_freq)
{ {
if (sg_policy->next_freq == next_freq) if (sg_policy->next_freq == next_freq &&
!cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
return false; return false;
sg_policy->next_freq = next_freq; sg_policy->next_freq = next_freq;
...@@ -161,7 +162,8 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, ...@@ -161,7 +162,8 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
freq = map_util_freq(util, freq, max); freq = map_util_freq(util, freq, max);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update &&
!cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
return sg_policy->next_freq; return sg_policy->next_freq;
sg_policy->need_freq_update = false; sg_policy->need_freq_update = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment