Commit 96607113 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge back cpufreq material for v4.18.

parents 0cf442c6 144ec880
......@@ -23,6 +23,8 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include "cpufreq-dt.h"
/* Power management in North Bridge register set */
#define ARMADA_37XX_NB_L0L1 0x18
#define ARMADA_37XX_NB_L2L3 0x1C
......@@ -56,6 +58,16 @@
*/
#define LOAD_LEVEL_NR 4
struct armada37xx_cpufreq_state {
struct regmap *regmap;
u32 nb_l0l1;
u32 nb_l2l3;
u32 nb_dyn_mod;
u32 nb_cpu_load;
};
static struct armada37xx_cpufreq_state *armada37xx_cpufreq_state;
struct armada_37xx_dvfs {
u32 cpu_freq_max;
u8 divider[LOAD_LEVEL_NR];
......@@ -136,7 +148,7 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
clk_set_parent(clk, parent);
}
static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
static void armada37xx_cpufreq_disable_dvfs(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
mask = ARMADA_37XX_NB_DFS_EN;
......@@ -162,10 +174,47 @@ static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
regmap_update_bits(base, reg, mask, mask);
}
static int armada37xx_cpufreq_suspend(struct cpufreq_policy *policy)
{
struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
regmap_read(state->regmap, ARMADA_37XX_NB_L0L1, &state->nb_l0l1);
regmap_read(state->regmap, ARMADA_37XX_NB_L2L3, &state->nb_l2l3);
regmap_read(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
&state->nb_cpu_load);
regmap_read(state->regmap, ARMADA_37XX_NB_DYN_MOD, &state->nb_dyn_mod);
return 0;
}
static int armada37xx_cpufreq_resume(struct cpufreq_policy *policy)
{
struct armada37xx_cpufreq_state *state = armada37xx_cpufreq_state;
/* Ensure DVFS is disabled otherwise the following registers are RO */
armada37xx_cpufreq_disable_dvfs(state->regmap);
regmap_write(state->regmap, ARMADA_37XX_NB_L0L1, state->nb_l0l1);
regmap_write(state->regmap, ARMADA_37XX_NB_L2L3, state->nb_l2l3);
regmap_write(state->regmap, ARMADA_37XX_NB_CPU_LOAD,
state->nb_cpu_load);
/*
* NB_DYN_MOD register is the one that actually enable back DVFS if it
* was enabled before the suspend operation. This must be done last
* otherwise other registers are not writable.
*/
regmap_write(state->regmap, ARMADA_37XX_NB_DYN_MOD, state->nb_dyn_mod);
return 0;
}
static int __init armada37xx_cpufreq_driver_init(void)
{
struct cpufreq_dt_platform_data pdata;
struct armada_37xx_dvfs *dvfs;
struct platform_device *pdev;
unsigned long freq;
unsigned int cur_frequency;
struct regmap *nb_pm_base;
struct device *cpu_dev;
......@@ -207,33 +256,58 @@ static int __init armada37xx_cpufreq_driver_init(void)
}
dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
if (!dvfs)
if (!dvfs) {
clk_put(clk);
return -EINVAL;
}
armada37xx_cpufreq_state = kmalloc(sizeof(*armada37xx_cpufreq_state),
GFP_KERNEL);
if (!armada37xx_cpufreq_state) {
clk_put(clk);
return -ENOMEM;
}
armada37xx_cpufreq_state->regmap = nb_pm_base;
armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
clk_put(clk);
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
freq = cur_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, 0);
if (ret) {
/* clean-up the already added opp before leaving */
while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
freq = cur_frequency / dvfs->divider[load_lvl];
dev_pm_opp_remove(cpu_dev, freq);
}
return ret;
}
if (ret)
goto remove_opp;
}
/* Now that everything is setup, enable the DVFS at hardware level */
armada37xx_cpufreq_enable_dvfs(nb_pm_base);
pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
pdata.suspend = armada37xx_cpufreq_suspend;
pdata.resume = armada37xx_cpufreq_resume;
pdev = platform_device_register_data(NULL, "cpufreq-dt", -1, &pdata,
sizeof(pdata));
ret = PTR_ERR_OR_ZERO(pdev);
if (ret)
goto disable_dvfs;
return 0;
disable_dvfs:
armada37xx_cpufreq_disable_dvfs(nb_pm_base);
remove_opp:
/* clean-up the already added opp before leaving */
while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
freq = cur_frequency / dvfs->divider[load_lvl];
dev_pm_opp_remove(cpu_dev, freq);
}
kfree(armada37xx_cpufreq_state);
return PTR_ERR_OR_ZERO(pdev);
return ret;
}
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init);
......
......@@ -66,8 +66,6 @@ static const struct of_device_id whitelist[] __initconst = {
{ .compatible = "renesas,r8a7792", },
{ .compatible = "renesas,r8a7793", },
{ .compatible = "renesas,r8a7794", },
{ .compatible = "renesas,r8a7795", },
{ .compatible = "renesas,r8a7796", },
{ .compatible = "renesas,sh73a0", },
{ .compatible = "rockchip,rk2928", },
......
......@@ -346,8 +346,14 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
if (ret)
return ret;
if (data && data->have_governor_per_policy)
dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
if (data) {
if (data->have_governor_per_policy)
dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
dt_cpufreq_driver.resume = data->resume;
if (data->suspend)
dt_cpufreq_driver.suspend = data->suspend;
}
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
......
......@@ -12,8 +12,13 @@
#include <linux/types.h>
struct cpufreq_policy;
struct cpufreq_dt_platform_data {
bool have_governor_per_policy;
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
};
#endif /* __CPUFREQ_DT_H__ */
......@@ -300,8 +300,19 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
#endif
}
static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
/**
* cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
* @policy: cpufreq policy to enable fast frequency switching for.
* @freqs: contain details of the frequency update.
* @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
*
* This function calls the transition notifiers and the "adjust_jiffies"
* function. It is called twice on all CPU frequency changes that have
* external effects.
*/
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs,
unsigned int state)
{
BUG_ON(irqs_disabled());
......@@ -313,54 +324,44 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
state, freqs->new);
switch (state) {
case CPUFREQ_PRECHANGE:
/* detect if the driver reported a value as "old frequency"
/*
* Detect if the driver reported a value as "old frequency"
* which is not equal to what the cpufreq core thinks is
* "old frequency".
*/
if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
if ((policy) && (policy->cpu == freqs->cpu) &&
(policy->cur) && (policy->cur != freqs->old)) {
if (policy->cur && (policy->cur != freqs->old)) {
pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
freqs->old, policy->cur);
freqs->old = policy->cur;
}
}
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_PRECHANGE, freqs);
for_each_cpu(freqs->cpu, policy->cpus) {
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_PRECHANGE, freqs);
}
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu\n",
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
cpumask_pr_args(policy->cpus));
for_each_cpu(freqs->cpu, policy->cpus) {
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
}
cpufreq_stats_record_transition(policy, freqs->new);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
policy->cur = freqs->new;
break;
policy->cur = freqs->new;
}
}
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies
* on frequency transition.
*
* This function calls the transition notifiers and the "adjust_jiffies"
* function. It is called twice on all CPU frequency changes that have
* external effects.
*/
static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
for_each_cpu(freqs->cpu, policy->cpus)
__cpufreq_notify_transition(policy, freqs, state);
}
/* Do post notifications when there are chances that transition has failed */
static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int transition_failed)
......
......@@ -1939,13 +1939,51 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
return 0;
}
/* Use of trace in passive mode:
*
* In passive mode the trace core_busy field (also known as the
* performance field, and lablelled as such on the graphs; also known as
* core_avg_perf) is not needed and so is re-assigned to indicate if the
* driver call was via the normal or fast switch path. Various graphs
* output from the intel_pstate_tracer.py utility that include core_busy
* (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
* so we use 10 to indicate the the normal path through the driver, and
* 90 to indicate the fast switch path through the driver.
* The scaled_busy field is not used, and is set to 0.
*/
#define INTEL_PSTATE_TRACE_TARGET 10
#define INTEL_PSTATE_TRACE_FAST_SWITCH 90
static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
{
struct sample *sample;
if (!trace_pstate_sample_enabled())
return;
if (!intel_pstate_sample(cpu, ktime_get()))
return;
sample = &cpu->sample;
trace_pstate_sample(trace_type,
0,
old_pstate,
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
sample->tsc,
get_avg_frequency(cpu),
fp_toint(cpu->iowait_boost * 100));
}
static int intel_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
struct cpufreq_freqs freqs;
int target_pstate;
int target_pstate, old_pstate;
update_turbo_state();
......@@ -1965,12 +2003,14 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
break;
}
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
old_pstate = cpu->pstate.current_pstate;
if (target_pstate != cpu->pstate.current_pstate) {
cpu->pstate.current_pstate = target_pstate;
wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
freqs.new = target_pstate * cpu->pstate.scaling;
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_TARGET, old_pstate);
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
......@@ -1980,13 +2020,15 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
int target_pstate;
int target_pstate, old_pstate;
update_turbo_state();
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
old_pstate = cpu->pstate.current_pstate;
intel_pstate_update_pstate(cpu, target_pstate);
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
return target_pstate * cpu->pstate.scaling;
}
......
......@@ -143,7 +143,7 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
{
unsigned long clkdiv, camdiv;
s3c_freq_dbg("%s: divsiors: h=%d, p=%d\n", __func__,
s3c_freq_dbg("%s: divisors: h=%d, p=%d\n", __func__,
cfg->divs.h_divisor, cfg->divs.p_divisor);
clkdiv = __raw_readl(S3C2410_CLKDIVN);
......
......@@ -252,7 +252,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_frequency);
*********************************************************************/
/* Keep in sync with the x86_cpu_id tables in the different modules */
unsigned int speedstep_detect_processor(void)
enum speedstep_processor speedstep_detect_processor(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
u32 ebx, msr_lo, msr_hi;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment