Commit f5c8cf2a authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

cpufreq: intel_pstate: hybrid: Use known scaling factor for P-cores

Commit 46573fd6 ("cpufreq: intel_pstate: hybrid: Rework HWP
calibration") attempted to use the information from CPPC (the nominal
performance in particular) to obtain the scaling factor allowing the
frequency to be computed if the HWP performance level of the given CPU
is known or vice versa.

However, it turns out that on some platforms this doesn't work, because
the CPPC information on them does not align with the contents of the
MSR_HWP_CAPABILITIES registers.

This basically means that the only way to make intel_pstate work on all
of the hybrid platforms to date is to use the observation that on all
of them the scaling factor between the HWP performance levels and
frequency for P-cores is 78741 (approximately 100000/1.27).  For
E-cores it is 100000, which is the same as for all of the non-hybrid
"core" platforms and does not require any changes.

Accordingly, make intel_pstate use 78741 as the scaling factor between
HWP performance levels and frequency for P-cores on all hybrid platforms
and drop the dependency of the HWP calibration code on CPPC.

Fixes: 46573fd6 ("cpufreq: intel_pstate: hybrid: Rework HWP calibration")
Reported-by: default avatarSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Acked-by: default avatarSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Tested-by: default avatarSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: 5.15+ <stable@vger.kernel.org> # 5.15+
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 8dbab94d
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/pm_qos.h> #include <linux/pm_qos.h>
#include <trace/events/power.h> #include <trace/events/power.h>
#include <asm/cpu.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
...@@ -398,16 +399,6 @@ static int intel_pstate_get_cppc_guaranteed(int cpu) ...@@ -398,16 +399,6 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
return cppc_perf.nominal_perf; return cppc_perf.nominal_perf;
} }
static u32 intel_pstate_cppc_nominal(int cpu)
{
u64 nominal_perf;
if (cppc_get_nominal_perf(cpu, &nominal_perf))
return 0;
return nominal_perf;
}
#else /* CONFIG_ACPI_CPPC_LIB */ #else /* CONFIG_ACPI_CPPC_LIB */
static inline void intel_pstate_set_itmt_prio(int cpu) static inline void intel_pstate_set_itmt_prio(int cpu)
{ {
...@@ -532,34 +523,17 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) ...@@ -532,34 +523,17 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu); int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
int scaling = cpu->pstate.scaling; int scaling = cpu->pstate.scaling;
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max(cpu->cpu));
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
/* cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
* If the product of the HWP performance scaling factor and the HWP_CAP perf_ctl_scaling);
* highest performance is greater than the maximum turbo frequency
* corresponding to the pstate_funcs.get_turbo() return value, the
* scaling factor is too high, so recompute it to make the HWP_CAP
* highest performance correspond to the maximum turbo frequency.
*/
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
if (turbo_freq < cpu->pstate.turbo_freq) {
cpu->pstate.turbo_freq = turbo_freq;
scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
cpu->pstate.scaling = scaling;
pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n",
cpu->cpu, scaling);
}
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
perf_ctl_scaling); perf_ctl_scaling);
...@@ -1965,37 +1939,24 @@ static int knl_get_turbo_pstate(int cpu) ...@@ -1965,37 +1939,24 @@ static int knl_get_turbo_pstate(int cpu)
return ret; return ret;
} }
#ifdef CONFIG_ACPI_CPPC_LIB static void hybrid_get_type(void *data)
static u32 hybrid_ref_perf;
static int hybrid_get_cpu_scaling(int cpu)
{ {
return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf, u8 *cpu_type = data;
intel_pstate_cppc_nominal(cpu));
*cpu_type = get_this_hybrid_cpu_type();
} }
static void intel_pstate_cppc_set_cpu_scaling(void) static int hybrid_get_cpu_scaling(int cpu)
{ {
u32 min_nominal_perf = U32_MAX; u8 cpu_type = 0;
int cpu;
for_each_present_cpu(cpu) {
u32 nominal_perf = intel_pstate_cppc_nominal(cpu);
if (nominal_perf && nominal_perf < min_nominal_perf) smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
min_nominal_perf = nominal_perf; /* P-cores have a smaller perf level-to-freqency scaling factor. */
} if (cpu_type == 0x40)
return 78741;
if (min_nominal_perf < U32_MAX) { return core_get_scaling();
hybrid_ref_perf = min_nominal_perf;
pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
}
}
#else
static inline void intel_pstate_cppc_set_cpu_scaling(void)
{
} }
#endif /* CONFIG_ACPI_CPPC_LIB */
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{ {
...@@ -3450,7 +3411,7 @@ static int __init intel_pstate_init(void) ...@@ -3450,7 +3411,7 @@ static int __init intel_pstate_init(void)
default_driver = &intel_pstate; default_driver = &intel_pstate;
if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
intel_pstate_cppc_set_cpu_scaling(); pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
goto hwp_cpu_matched; goto hwp_cpu_matched;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment