Commit 4ffe18c2 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

* pm-cpufreq: (53 commits)
  cpufreq: speedstep-lib: Use monotonic clock
  cpufreq: powernv: Increase the verbosity of OCC console messages
  cpufreq: sfi: use kmemdup rather than duplicating its implementation
  cpufreq: drop !cpufreq_driver check from cpufreq_parse_governor()
  cpufreq: rename cpufreq_real_policy as cpufreq_user_policy
  cpufreq: remove redundant 'policy' field from user_policy
  cpufreq: remove redundant 'governor' field from user_policy
  cpufreq: update user_policy.* on success
  cpufreq: use memcpy() to copy policy
  cpufreq: remove redundant CPUFREQ_INCOMPATIBLE notifier event
  cpufreq: mediatek: Add MT8173 cpufreq driver
  dt-bindings: mediatek: Add MT8173 CPU DVFS clock bindings
  intel_pstate: append more Oracle OEM table id to vendor bypass list
  intel_pstate: Add SKY-S support
  intel_pstate: Fix possible overflow complained by Coverity
  cpufreq: Correct a freq check in cpufreq_set_policy()
  cpufreq: Lock CPU online/offline in cpufreq_register_driver()
  cpufreq: Replace recover_policy with new_policy in cpufreq_online()
  cpufreq: Separate CPU device registration from CPU online
  cpufreq: powernv: Restore cpu frequency to policy->cur on unthrottling
  ...
parents 49801251 72e624de
...@@ -55,16 +55,13 @@ transition notifiers. ...@@ -55,16 +55,13 @@ transition notifiers.
---------------------------- ----------------------------
These are notified when a new policy is intended to be set. Each These are notified when a new policy is intended to be set. Each
CPUFreq policy notifier is called three times for a policy transition: CPUFreq policy notifier is called twice for a policy transition:
1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if 1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if
they see a need for this - may it be thermal considerations or they see a need for this - may it be thermal considerations or
hardware limitations. hardware limitations.
2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid 2.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
hardware failure.
3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
- if two hardware drivers failed to agree on a new policy before this - if two hardware drivers failed to agree on a new policy before this
stage, the incompatible hardware shall be shut down, and the user stage, the incompatible hardware shall be shut down, and the user
informed of this. informed of this.
......
Device Tree Clock bindins for CPU DVFS of Mediatek MT8173 SoC
Required properties:
- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
- clock-names: Should contain the following:
"cpu" - The multiplexer for clock input of CPU cluster.
"intermediate" - A parent of "cpu" clock which is used as "intermediate" clock
source (usually MAINPLL) when the original CPU PLL is under
transition and not stable yet.
Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
generic clock consumer properties.
- proc-supply: Regulator for Vproc of CPU cluster.
Optional properties:
- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
needs to do "voltage tracking" to step by step scale up/down Vproc and
Vsram to fit SoC specific needs. When absent, the voltage scaling
flow is handled by hardware, hence no software "voltage tracking" is
needed.
Example:
--------
cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x000>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA53SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x001>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA53SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
cpu2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA57SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
cpu3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
clocks = <&infracfg CLK_INFRA_CA57SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
};
&cpu0 {
proc-supply = <&mt6397_vpca15_reg>;
};
&cpu1 {
proc-supply = <&mt6397_vpca15_reg>;
};
&cpu2 {
proc-supply = <&da9211_vcpu_reg>;
sram-supply = <&mt6397_vsramca7_reg>;
};
&cpu3 {
proc-supply = <&da9211_vcpu_reg>;
sram-supply = <&mt6397_vsramca7_reg>;
};
...@@ -361,6 +361,7 @@ enum opal_msg_type { ...@@ -361,6 +361,7 @@ enum opal_msg_type {
OPAL_MSG_HMI_EVT, OPAL_MSG_HMI_EVT,
OPAL_MSG_DPO, OPAL_MSG_DPO,
OPAL_MSG_PRD, OPAL_MSG_PRD,
OPAL_MSG_OCC,
OPAL_MSG_TYPE_MAX, OPAL_MSG_TYPE_MAX,
}; };
...@@ -700,6 +701,17 @@ struct opal_prd_msg_header { ...@@ -700,6 +701,17 @@ struct opal_prd_msg_header {
struct opal_prd_msg; struct opal_prd_msg;
#define OCC_RESET 0
#define OCC_LOAD 1
#define OCC_THROTTLE 2
#define OCC_MAX_THROTTLE_STATUS 5
struct opal_occ_msg {
__be64 type;
__be64 chip;
__be64 throttle_status;
};
/* /*
* SG entries * SG entries
* *
......
...@@ -83,7 +83,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb, ...@@ -83,7 +83,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
if (ignore_ppc) if (ignore_ppc)
return 0; return 0;
if (event != CPUFREQ_INCOMPATIBLE) if (event != CPUFREQ_ADJUST)
return 0; return 0;
mutex_lock(&performance_mutex); mutex_lock(&performance_mutex);
...@@ -780,9 +780,7 @@ acpi_processor_register_performance(struct acpi_processor_performance ...@@ -780,9 +780,7 @@ acpi_processor_register_performance(struct acpi_processor_performance
EXPORT_SYMBOL(acpi_processor_register_performance); EXPORT_SYMBOL(acpi_processor_register_performance);
void void acpi_processor_unregister_performance(unsigned int cpu)
acpi_processor_unregister_performance(struct acpi_processor_performance
*performance, unsigned int cpu)
{ {
struct acpi_processor *pr; struct acpi_processor *pr;
......
...@@ -130,6 +130,13 @@ config ARM_KIRKWOOD_CPUFREQ ...@@ -130,6 +130,13 @@ config ARM_KIRKWOOD_CPUFREQ
This adds the CPUFreq driver for Marvell Kirkwood This adds the CPUFreq driver for Marvell Kirkwood
SoCs. SoCs.
config ARM_MT8173_CPUFREQ
bool "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR
select PM_OPP
help
This adds the CPUFreq driver support for Mediatek MT8173 SoC.
config ARM_OMAP2PLUS_CPUFREQ config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+" bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS depends on ARCH_OMAP2PLUS
......
...@@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o ...@@ -62,6 +62,7 @@ obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
......
...@@ -65,18 +65,21 @@ enum { ...@@ -65,18 +65,21 @@ enum {
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25) #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
struct acpi_cpufreq_data { struct acpi_cpufreq_data {
struct acpi_processor_performance *acpi_data;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
unsigned int resume; unsigned int resume;
unsigned int cpu_feature; unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
cpumask_var_t freqdomain_cpus; cpumask_var_t freqdomain_cpus;
}; };
static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
/* acpi_perf_data is a pointer to percpu data. */ /* acpi_perf_data is a pointer to percpu data. */
static struct acpi_processor_performance __percpu *acpi_perf_data; static struct acpi_processor_performance __percpu *acpi_perf_data;
static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
{
return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
}
static struct cpufreq_driver acpi_cpufreq_driver; static struct cpufreq_driver acpi_cpufreq_driver;
static unsigned int acpi_pstate_strict; static unsigned int acpi_pstate_strict;
...@@ -144,7 +147,7 @@ static int _store_boost(int val) ...@@ -144,7 +147,7 @@ static int _store_boost(int val)
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
return cpufreq_show_cpus(data->freqdomain_cpus, buf); return cpufreq_show_cpus(data->freqdomain_cpus, buf);
} }
...@@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) ...@@ -202,7 +205,7 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
int i; int i;
perf = data->acpi_data; perf = to_perf_data(data);
for (i = 0; i < perf->state_count; i++) { for (i = 0; i < perf->state_count; i++) {
if (value == perf->states[i].status) if (value == perf->states[i].status)
...@@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) ...@@ -221,7 +224,7 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
else else
msr &= INTEL_MSR_RANGE; msr &= INTEL_MSR_RANGE;
perf = data->acpi_data; perf = to_perf_data(data);
cpufreq_for_each_entry(pos, data->freq_table) cpufreq_for_each_entry(pos, data->freq_table)
if (msr == perf->states[pos->driver_data].status) if (msr == perf->states[pos->driver_data].status)
...@@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd) ...@@ -327,7 +330,8 @@ static void drv_write(struct drv_cmd *cmd)
put_cpu(); put_cpu();
} }
static u32 get_cur_val(const struct cpumask *mask) static u32
get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
{ {
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
struct drv_cmd cmd; struct drv_cmd cmd;
...@@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask) ...@@ -335,7 +339,7 @@ static u32 get_cur_val(const struct cpumask *mask)
if (unlikely(cpumask_empty(mask))) if (unlikely(cpumask_empty(mask)))
return 0; return 0;
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE: case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE; cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
cmd.addr.msr.reg = MSR_IA32_PERF_CTL; cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
...@@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask) ...@@ -346,7 +350,7 @@ static u32 get_cur_val(const struct cpumask *mask)
break; break;
case SYSTEM_IO_CAPABLE: case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE; cmd.type = SYSTEM_IO_CAPABLE;
perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; perf = to_perf_data(data);
cmd.addr.io.port = perf->control_register.address; cmd.addr.io.port = perf->control_register.address;
cmd.addr.io.bit_width = perf->control_register.bit_width; cmd.addr.io.bit_width = perf->control_register.bit_width;
break; break;
...@@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask) ...@@ -364,19 +368,24 @@ static u32 get_cur_val(const struct cpumask *mask)
static unsigned int get_cur_freq_on_cpu(unsigned int cpu) static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); struct acpi_cpufreq_data *data;
struct cpufreq_policy *policy;
unsigned int freq; unsigned int freq;
unsigned int cached_freq; unsigned int cached_freq;
pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
if (unlikely(data == NULL || policy = cpufreq_cpu_get(cpu);
data->acpi_data == NULL || data->freq_table == NULL)) { if (unlikely(!policy))
return 0; return 0;
}
cached_freq = data->freq_table[data->acpi_data->state].frequency; data = policy->driver_data;
freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); cpufreq_cpu_put(policy);
if (unlikely(!data || !data->freq_table))
return 0;
cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
if (freq != cached_freq) { if (freq != cached_freq) {
/* /*
* The dreaded BIOS frequency change behind our back. * The dreaded BIOS frequency change behind our back.
...@@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, ...@@ -397,7 +406,7 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
unsigned int i; unsigned int i;
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
cur_freq = extract_freq(get_cur_val(mask), data); cur_freq = extract_freq(get_cur_val(mask, data), data);
if (cur_freq == freq) if (cur_freq == freq)
return 1; return 1;
udelay(10); udelay(10);
...@@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, ...@@ -408,18 +417,17 @@ static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
static int acpi_cpufreq_target(struct cpufreq_policy *policy, static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index) unsigned int index)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf; struct acpi_processor_performance *perf;
struct drv_cmd cmd; struct drv_cmd cmd;
unsigned int next_perf_state = 0; /* Index into perf table */ unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0; int result = 0;
if (unlikely(data == NULL || if (unlikely(data == NULL || data->freq_table == NULL)) {
data->acpi_data == NULL || data->freq_table == NULL)) {
return -ENODEV; return -ENODEV;
} }
perf = data->acpi_data; perf = to_perf_data(data);
next_perf_state = data->freq_table[index].driver_data; next_perf_state = data->freq_table[index].driver_data;
if (perf->state == next_perf_state) { if (perf->state == next_perf_state) {
if (unlikely(data->resume)) { if (unlikely(data->resume)) {
...@@ -482,8 +490,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, ...@@ -482,8 +490,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
static unsigned long static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{ {
struct acpi_processor_performance *perf = data->acpi_data; struct acpi_processor_performance *perf;
perf = to_perf_data(data);
if (cpu_khz) { if (cpu_khz) {
/* search the closest match to cpu_khz */ /* search the closest match to cpu_khz */
unsigned int i; unsigned int i;
...@@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -672,17 +681,17 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_free; goto err_free;
} }
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); perf = per_cpu_ptr(acpi_perf_data, cpu);
per_cpu(acfreq_data, cpu) = data; data->acpi_perf_cpu = cpu;
policy->driver_data = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
result = acpi_processor_register_performance(data->acpi_data, cpu); result = acpi_processor_register_performance(perf, cpu);
if (result) if (result)
goto err_free_mask; goto err_free_mask;
perf = data->acpi_data;
policy->shared_type = perf->shared_type; policy->shared_type = perf->shared_type;
/* /*
...@@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -838,26 +847,25 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
err_freqfree: err_freqfree:
kfree(data->freq_table); kfree(data->freq_table);
err_unreg: err_unreg:
acpi_processor_unregister_performance(perf, cpu); acpi_processor_unregister_performance(cpu);
err_free_mask: err_free_mask:
free_cpumask_var(data->freqdomain_cpus); free_cpumask_var(data->freqdomain_cpus);
err_free: err_free:
kfree(data); kfree(data);
per_cpu(acfreq_data, cpu) = NULL; policy->driver_data = NULL;
return result; return result;
} }
static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("acpi_cpufreq_cpu_exit\n"); pr_debug("acpi_cpufreq_cpu_exit\n");
if (data) { if (data) {
per_cpu(acfreq_data, policy->cpu) = NULL; policy->driver_data = NULL;
acpi_processor_unregister_performance(data->acpi_data, acpi_processor_unregister_performance(data->acpi_perf_cpu);
policy->cpu);
free_cpumask_var(data->freqdomain_cpus); free_cpumask_var(data->freqdomain_cpus);
kfree(data->freq_table); kfree(data->freq_table);
kfree(data); kfree(data);
...@@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) ...@@ -868,7 +876,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int acpi_cpufreq_resume(struct cpufreq_policy *policy) static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{ {
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); struct acpi_cpufreq_data *data = policy->driver_data;
pr_debug("acpi_cpufreq_resume\n"); pr_debug("acpi_cpufreq_resume\n");
...@@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy) ...@@ -880,7 +888,9 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
static struct freq_attr *acpi_cpufreq_attr[] = { static struct freq_attr *acpi_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs, &cpufreq_freq_attr_scaling_available_freqs,
&freqdomain_cpus, &freqdomain_cpus,
NULL, /* this is a placeholder for cpb, do not remove */ #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
&cpb,
#endif
NULL, NULL,
}; };
...@@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void) ...@@ -953,17 +963,16 @@ static int __init acpi_cpufreq_init(void)
* only if configured. This is considered legacy code, which * only if configured. This is considered legacy code, which
* will probably be removed at some point in the future. * will probably be removed at some point in the future.
*/ */
if (check_amd_hwpstate_cpu(0)) { if (!check_amd_hwpstate_cpu(0)) {
struct freq_attr **iter; struct freq_attr **attr;
pr_debug("adding sysfs entry for cpb\n");
for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) pr_debug("CPB unsupported, do not expose it\n");
;
/* make sure there is a terminator behind it */ for (attr = acpi_cpufreq_attr; *attr; attr++)
if (iter[1] == NULL) if (*attr == &cpb) {
*iter = &cpb; *attr = NULL;
break;
}
} }
#endif #endif
acpi_cpufreq_boost_init(); acpi_cpufreq_boost_init();
......
This diff is collapsed.
...@@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, ...@@ -47,7 +47,7 @@ static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
static void cs_check_cpu(int cpu, unsigned int load) static void cs_check_cpu(int cpu, unsigned int load)
{ {
struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
struct dbs_data *dbs_data = policy->governor_data; struct dbs_data *dbs_data = policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
...@@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load) ...@@ -102,26 +102,15 @@ static void cs_check_cpu(int cpu, unsigned int load)
} }
} }
static void cs_dbs_timer(struct work_struct *work) static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
struct dbs_data *dbs_data, bool modify_all)
{ {
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
cpu);
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
bool modify_all = true;
mutex_lock(&core_dbs_info->cdbs.timer_mutex); if (modify_all)
if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate)) dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
modify_all = false;
else
dbs_check_cpu(dbs_data, cpu);
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); return delay_for_sampling_rate(cs_tuners->sampling_rate);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
} }
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
...@@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -135,7 +124,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!dbs_info->enable) if (!dbs_info->enable)
return 0; return 0;
policy = dbs_info->cdbs.cur_policy; policy = dbs_info->cdbs.shared->policy;
/* /*
* we only care if our internally tracked freq moves outside the 'valid' * we only care if our internally tracked freq moves outside the 'valid'
......
This diff is collapsed.
...@@ -109,7 +109,7 @@ store_one(_gov, file_name) ...@@ -109,7 +109,7 @@ store_one(_gov, file_name)
/* create helper routines */ /* create helper routines */
#define define_get_cpu_dbs_routines(_dbs_info) \ #define define_get_cpu_dbs_routines(_dbs_info) \
static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu) \ static struct cpu_dbs_info *get_cpu_cdbs(int cpu) \
{ \ { \
return &per_cpu(_dbs_info, cpu).cdbs; \ return &per_cpu(_dbs_info, cpu).cdbs; \
} \ } \
...@@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu) \ ...@@ -128,9 +128,20 @@ static void *get_cpu_dbs_info_s(int cpu) \
* cs_*: Conservative governor * cs_*: Conservative governor
*/ */
/* Common to all CPUs of a policy */
struct cpu_common_dbs_info {
struct cpufreq_policy *policy;
/*
* percpu mutex that serializes governor limit change with dbs_timer
* invocation. We do not want dbs_timer to run when user is changing
* the governor or limits.
*/
struct mutex timer_mutex;
ktime_t time_stamp;
};
/* Per cpu structures */ /* Per cpu structures */
struct cpu_dbs_common_info { struct cpu_dbs_info {
int cpu;
u64 prev_cpu_idle; u64 prev_cpu_idle;
u64 prev_cpu_wall; u64 prev_cpu_wall;
u64 prev_cpu_nice; u64 prev_cpu_nice;
...@@ -141,19 +152,12 @@ struct cpu_dbs_common_info { ...@@ -141,19 +152,12 @@ struct cpu_dbs_common_info {
* wake-up from idle. * wake-up from idle.
*/ */
unsigned int prev_load; unsigned int prev_load;
struct cpufreq_policy *cur_policy; struct delayed_work dwork;
struct delayed_work work; struct cpu_common_dbs_info *shared;
/*
* percpu mutex that serializes governor limit change with gov_dbs_timer
* invocation. We do not want gov_dbs_timer to run when user is changing
* the governor or limits.
*/
struct mutex timer_mutex;
ktime_t time_stamp;
}; };
struct od_cpu_dbs_info_s { struct od_cpu_dbs_info_s {
struct cpu_dbs_common_info cdbs; struct cpu_dbs_info cdbs;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo; unsigned int freq_lo;
unsigned int freq_lo_jiffies; unsigned int freq_lo_jiffies;
...@@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s { ...@@ -163,7 +167,7 @@ struct od_cpu_dbs_info_s {
}; };
struct cs_cpu_dbs_info_s { struct cs_cpu_dbs_info_s {
struct cpu_dbs_common_info cdbs; struct cpu_dbs_info cdbs;
unsigned int down_skip; unsigned int down_skip;
unsigned int requested_freq; unsigned int requested_freq;
unsigned int enable:1; unsigned int enable:1;
...@@ -204,9 +208,11 @@ struct common_dbs_data { ...@@ -204,9 +208,11 @@ struct common_dbs_data {
*/ */
struct dbs_data *gdbs_data; struct dbs_data *gdbs_data;
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu); struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
void *(*get_cpu_dbs_info_s)(int cpu); void *(*get_cpu_dbs_info_s)(int cpu);
void (*gov_dbs_timer)(struct work_struct *work); unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs,
struct dbs_data *dbs_data,
bool modify_all);
void (*gov_check_cpu)(int cpu, unsigned int load); void (*gov_check_cpu)(int cpu, unsigned int load);
int (*init)(struct dbs_data *dbs_data, bool notify); int (*init)(struct dbs_data *dbs_data, bool notify);
void (*exit)(struct dbs_data *dbs_data, bool notify); void (*exit)(struct dbs_data *dbs_data, bool notify);
...@@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol \ ...@@ -265,8 +271,6 @@ static ssize_t show_sampling_rate_min_gov_pol \
extern struct mutex cpufreq_governor_lock; extern struct mutex cpufreq_governor_lock;
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
int cpufreq_governor_dbs(struct cpufreq_policy *policy, int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct common_dbs_data *cdata, unsigned int event); struct common_dbs_data *cdata, unsigned int event);
void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
......
...@@ -155,7 +155,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) ...@@ -155,7 +155,7 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
static void od_check_cpu(int cpu, unsigned int load) static void od_check_cpu(int cpu, unsigned int load)
{ {
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
struct dbs_data *dbs_data = policy->governor_data; struct dbs_data *dbs_data = policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct od_dbs_tuners *od_tuners = dbs_data->tuners;
...@@ -191,46 +191,40 @@ static void od_check_cpu(int cpu, unsigned int load) ...@@ -191,46 +191,40 @@ static void od_check_cpu(int cpu, unsigned int load)
} }
} }
static void od_dbs_timer(struct work_struct *work) static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
struct dbs_data *dbs_data, bool modify_all)
{ {
struct od_cpu_dbs_info_s *dbs_info = struct cpufreq_policy *policy = cdbs->shared->policy;
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); unsigned int cpu = policy->cpu;
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
cpu); cpu);
struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct od_dbs_tuners *od_tuners = dbs_data->tuners;
int delay = 0, sample_type = core_dbs_info->sample_type; int delay = 0, sample_type = dbs_info->sample_type;
bool modify_all = true;
mutex_lock(&core_dbs_info->cdbs.timer_mutex); if (!modify_all)
if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
modify_all = false;
goto max_delay; goto max_delay;
}
/* Common NORMAL_SAMPLE setup */ /* Common NORMAL_SAMPLE setup */
core_dbs_info->sample_type = OD_NORMAL_SAMPLE; dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) { if (sample_type == OD_SUB_SAMPLE) {
delay = core_dbs_info->freq_lo_jiffies; delay = dbs_info->freq_lo_jiffies;
__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, __cpufreq_driver_target(policy, dbs_info->freq_lo,
core_dbs_info->freq_lo, CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
} else { } else {
dbs_check_cpu(dbs_data, cpu); dbs_check_cpu(dbs_data, cpu);
if (core_dbs_info->freq_lo) { if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */ /* Setup timer for SUB_SAMPLE */
core_dbs_info->sample_type = OD_SUB_SAMPLE; dbs_info->sample_type = OD_SUB_SAMPLE;
delay = core_dbs_info->freq_hi_jiffies; delay = dbs_info->freq_hi_jiffies;
} }
} }
max_delay: max_delay:
if (!delay) if (!delay)
delay = delay_for_sampling_rate(od_tuners->sampling_rate delay = delay_for_sampling_rate(od_tuners->sampling_rate
* core_dbs_info->rate_mult); * dbs_info->rate_mult);
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all); return delay;
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
} }
/************************** sysfs interface ************************/ /************************** sysfs interface ************************/
...@@ -273,27 +267,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data, ...@@ -273,27 +267,27 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
dbs_info = &per_cpu(od_cpu_dbs_info, cpu); dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy); cpufreq_cpu_put(policy);
mutex_lock(&dbs_info->cdbs.timer_mutex); mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
if (!delayed_work_pending(&dbs_info->cdbs.work)) { if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
mutex_unlock(&dbs_info->cdbs.timer_mutex); mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
continue; continue;
} }
next_sampling = jiffies + usecs_to_jiffies(new_rate); next_sampling = jiffies + usecs_to_jiffies(new_rate);
appointed_at = dbs_info->cdbs.work.timer.expires; appointed_at = dbs_info->cdbs.dwork.timer.expires;
if (time_before(next_sampling, appointed_at)) { if (time_before(next_sampling, appointed_at)) {
mutex_unlock(&dbs_info->cdbs.timer_mutex); mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
cancel_delayed_work_sync(&dbs_info->cdbs.work); cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
mutex_lock(&dbs_info->cdbs.timer_mutex); mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, gov_queue_work(dbs_data, policy,
usecs_to_jiffies(new_rate), true); usecs_to_jiffies(new_rate), true);
} }
mutex_unlock(&dbs_info->cdbs.timer_mutex); mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
} }
} }
...@@ -556,13 +550,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias) ...@@ -556,13 +550,16 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct cpu_common_dbs_info *shared;
if (cpumask_test_cpu(cpu, &done)) if (cpumask_test_cpu(cpu, &done))
continue; continue;
policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
if (!policy) if (!shared)
continue; continue;
policy = shared->policy;
cpumask_or(&done, &done, policy->cpus); cpumask_or(&done, &done, policy->cpus);
if (policy->governor != &cpufreq_gov_ondemand) if (policy->governor != &cpufreq_gov_ondemand)
......
...@@ -78,7 +78,7 @@ static int eps_acpi_init(void) ...@@ -78,7 +78,7 @@ static int eps_acpi_init(void)
static int eps_acpi_exit(struct cpufreq_policy *policy) static int eps_acpi_exit(struct cpufreq_policy *policy)
{ {
if (eps_acpi_cpu_perf) { if (eps_acpi_cpu_perf) {
acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0); acpi_processor_unregister_performance(0);
free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map); free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
kfree(eps_acpi_cpu_perf); kfree(eps_acpi_cpu_perf);
eps_acpi_cpu_perf = NULL; eps_acpi_cpu_perf = NULL;
......
...@@ -29,7 +29,6 @@ MODULE_LICENSE("GPL"); ...@@ -29,7 +29,6 @@ MODULE_LICENSE("GPL");
struct cpufreq_acpi_io { struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data; struct acpi_processor_performance acpi_data;
struct cpufreq_frequency_table *freq_table;
unsigned int resume; unsigned int resume;
}; };
...@@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init ( ...@@ -221,6 +220,7 @@ acpi_cpufreq_cpu_init (
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
struct cpufreq_acpi_io *data; struct cpufreq_acpi_io *data;
unsigned int result = 0; unsigned int result = 0;
struct cpufreq_frequency_table *freq_table;
pr_debug("acpi_cpufreq_cpu_init\n"); pr_debug("acpi_cpufreq_cpu_init\n");
...@@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init ( ...@@ -254,10 +254,10 @@ acpi_cpufreq_cpu_init (
} }
/* alloc freq_table */ /* alloc freq_table */
data->freq_table = kzalloc(sizeof(*data->freq_table) * freq_table = kzalloc(sizeof(*freq_table) *
(data->acpi_data.state_count + 1), (data->acpi_data.state_count + 1),
GFP_KERNEL); GFP_KERNEL);
if (!data->freq_table) { if (!freq_table) {
result = -ENOMEM; result = -ENOMEM;
goto err_unreg; goto err_unreg;
} }
...@@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init ( ...@@ -276,14 +276,14 @@ acpi_cpufreq_cpu_init (
for (i = 0; i <= data->acpi_data.state_count; i++) for (i = 0; i <= data->acpi_data.state_count; i++)
{ {
if (i < data->acpi_data.state_count) { if (i < data->acpi_data.state_count) {
data->freq_table[i].frequency = freq_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000; data->acpi_data.states[i].core_frequency * 1000;
} else { } else {
data->freq_table[i].frequency = CPUFREQ_TABLE_END; freq_table[i].frequency = CPUFREQ_TABLE_END;
} }
} }
result = cpufreq_table_validate_and_show(policy, data->freq_table); result = cpufreq_table_validate_and_show(policy, freq_table);
if (result) { if (result) {
goto err_freqfree; goto err_freqfree;
} }
...@@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init ( ...@@ -311,9 +311,9 @@ acpi_cpufreq_cpu_init (
return (result); return (result);
err_freqfree: err_freqfree:
kfree(data->freq_table); kfree(freq_table);
err_unreg: err_unreg:
acpi_processor_unregister_performance(&data->acpi_data, cpu); acpi_processor_unregister_performance(cpu);
err_free: err_free:
kfree(data); kfree(data);
acpi_io_data[cpu] = NULL; acpi_io_data[cpu] = NULL;
...@@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit ( ...@@ -332,8 +332,8 @@ acpi_cpufreq_cpu_exit (
if (data) { if (data) {
acpi_io_data[policy->cpu] = NULL; acpi_io_data[policy->cpu] = NULL;
acpi_processor_unregister_performance(&data->acpi_data, acpi_processor_unregister_performance(policy->cpu);
policy->cpu); kfree(policy->freq_table);
kfree(data); kfree(data);
} }
......
...@@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy, ...@@ -98,11 +98,10 @@ static int integrator_set_target(struct cpufreq_policy *policy,
/* get current setting */ /* get current setting */
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) { if (machine_is_integrator())
vco.s = (cm_osc >> 8) & 7; vco.s = (cm_osc >> 8) & 7;
} else if (machine_is_cintegrator()) { else if (machine_is_cintegrator())
vco.s = 1; vco.s = 1;
}
vco.v = cm_osc & 255; vco.v = cm_osc & 255;
vco.r = 22; vco.r = 22;
freqs.old = icst_hz(&cclk_params, vco) / 1000; freqs.old = icst_hz(&cclk_params, vco) / 1000;
...@@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu) ...@@ -163,11 +162,10 @@ static unsigned int integrator_get(unsigned int cpu)
/* detect memory etc. */ /* detect memory etc. */
cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
if (machine_is_integrator()) { if (machine_is_integrator())
vco.s = (cm_osc >> 8) & 7; vco.s = (cm_osc >> 8) & 7;
} else { else
vco.s = 1; vco.s = 1;
}
vco.v = cm_osc & 255; vco.v = cm_osc & 255;
vco.r = 22; vco.r = 22;
...@@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev) ...@@ -203,7 +201,7 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
struct resource *res; struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) if (!res)
return -ENODEV; return -ENODEV;
cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
...@@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = { ...@@ -234,6 +232,6 @@ static struct platform_driver integrator_cpufreq_driver = {
module_platform_driver_probe(integrator_cpufreq_driver, module_platform_driver_probe(integrator_cpufreq_driver,
integrator_cpufreq_probe); integrator_cpufreq_probe);
MODULE_AUTHOR ("Russell M. King"); MODULE_AUTHOR("Russell M. King");
MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs"); MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
MODULE_LICENSE ("GPL"); MODULE_LICENSE("GPL");
...@@ -484,12 +484,11 @@ static void __init intel_pstate_sysfs_expose_params(void) ...@@ -484,12 +484,11 @@ static void __init intel_pstate_sysfs_expose_params(void)
} }
/************************** sysfs end ************************/ /************************** sysfs end ************************/
static void intel_pstate_hwp_enable(void) static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{ {
hwp_active++;
pr_info("intel_pstate: HWP enabled\n"); pr_info("intel_pstate: HWP enabled\n");
wrmsrl( MSR_PM_ENABLE, 0x1); wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
} }
static int byt_get_min_pstate(void) static int byt_get_min_pstate(void)
...@@ -522,7 +521,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) ...@@ -522,7 +521,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
int32_t vid_fp; int32_t vid_fp;
u32 vid; u32 vid;
val = pstate << 8; val = (u64)pstate << 8;
if (limits.no_turbo && !limits.turbo_disabled) if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32; val |= (u64)1 << 32;
...@@ -611,7 +610,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) ...@@ -611,7 +610,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
{ {
u64 val; u64 val;
val = pstate << 8; val = (u64)pstate << 8;
if (limits.no_turbo && !limits.turbo_disabled) if (limits.no_turbo && !limits.turbo_disabled)
val |= (u64)1 << 32; val |= (u64)1 << 32;
...@@ -909,6 +908,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ...@@ -909,6 +908,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(0x4c, byt_params), ICPU(0x4c, byt_params),
ICPU(0x4e, core_params), ICPU(0x4e, core_params),
ICPU(0x4f, core_params), ICPU(0x4f, core_params),
ICPU(0x5e, core_params),
ICPU(0x56, core_params), ICPU(0x56, core_params),
ICPU(0x57, knl_params), ICPU(0x57, knl_params),
{} {}
...@@ -933,6 +933,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum) ...@@ -933,6 +933,10 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum]; cpu = all_cpu_data[cpunum];
cpu->cpu = cpunum; cpu->cpu = cpunum;
if (hwp_active)
intel_pstate_hwp_enable(cpu);
intel_pstate_get_cpu_pstates(cpu); intel_pstate_get_cpu_pstates(cpu);
init_timer_deferrable(&cpu->timer); init_timer_deferrable(&cpu->timer);
...@@ -1170,6 +1174,10 @@ static struct hw_vendor_info vendor_info[] = { ...@@ -1170,6 +1174,10 @@ static struct hw_vendor_info vendor_info[] = {
{1, "ORACLE", "X4270M3 ", PPC}, {1, "ORACLE", "X4270M3 ", PPC},
{1, "ORACLE", "X4270M2 ", PPC}, {1, "ORACLE", "X4270M2 ", PPC},
{1, "ORACLE", "X4170M2 ", PPC}, {1, "ORACLE", "X4170M2 ", PPC},
{1, "ORACLE", "X4170 M3", PPC},
{1, "ORACLE", "X4275 M3", PPC},
{1, "ORACLE", "X6-2 ", PPC},
{1, "ORACLE", "Sudbury ", PPC},
{0, "", ""}, {0, "", ""},
}; };
...@@ -1246,7 +1254,7 @@ static int __init intel_pstate_init(void) ...@@ -1246,7 +1254,7 @@ static int __init intel_pstate_init(void)
return -ENOMEM; return -ENOMEM;
if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
intel_pstate_hwp_enable(); hwp_active++;
if (!hwp_active && hwp_only) if (!hwp_active && hwp_only)
goto out; goto out;
......
This diff is collapsed.
...@@ -421,7 +421,7 @@ static int powernow_acpi_init(void) ...@@ -421,7 +421,7 @@ static int powernow_acpi_init(void)
return 0; return 0;
err2: err2:
acpi_processor_unregister_performance(acpi_processor_perf, 0); acpi_processor_unregister_performance(0);
err1: err1:
free_cpumask_var(acpi_processor_perf->shared_cpu_map); free_cpumask_var(acpi_processor_perf->shared_cpu_map);
err05: err05:
...@@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy) ...@@ -661,7 +661,7 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
{ {
#ifdef CONFIG_X86_POWERNOW_K7_ACPI #ifdef CONFIG_X86_POWERNOW_K7_ACPI
if (acpi_processor_perf) { if (acpi_processor_perf) {
acpi_processor_unregister_performance(acpi_processor_perf, 0); acpi_processor_unregister_performance(0);
free_cpumask_var(acpi_processor_perf->shared_cpu_map); free_cpumask_var(acpi_processor_perf->shared_cpu_map);
kfree(acpi_processor_perf); kfree(acpi_processor_perf);
} }
......
...@@ -795,7 +795,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -795,7 +795,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
kfree(powernow_table); kfree(powernow_table);
err_out: err_out:
acpi_processor_unregister_performance(&data->acpi_data, data->cpu); acpi_processor_unregister_performance(data->cpu);
/* data->acpi_data.state_count informs us at ->exit() /* data->acpi_data.state_count informs us at ->exit()
* whether ACPI was used */ * whether ACPI was used */
...@@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, ...@@ -863,8 +863,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
{ {
if (data->acpi_data.state_count) if (data->acpi_data.state_count)
acpi_processor_unregister_performance(&data->acpi_data, acpi_processor_unregister_performance(data->cpu);
data->cpu);
free_cpumask_var(data->acpi_data.shared_cpu_map); free_cpumask_var(data->acpi_data.shared_cpu_map);
} }
......
...@@ -27,20 +27,31 @@ ...@@ -27,20 +27,31 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/slab.h>
#include <asm/cputhreads.h> #include <asm/cputhreads.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/reg.h> #include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#include <asm/opal.h>
#define POWERNV_MAX_PSTATES 256 #define POWERNV_MAX_PSTATES 256
#define PMSR_PSAFE_ENABLE (1UL << 30) #define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31) #define PMSR_SPR_EM_DISABLE (1UL << 31)
#define PMSR_MAX(x) ((x >> 32) & 0xFF) #define PMSR_MAX(x) ((x >> 32) & 0xFF)
#define PMSR_LP(x) ((x >> 48) & 0xFF)
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting, throttled; static bool rebooting, throttled, occ_reset;
static struct chip {
unsigned int id;
bool throttled;
cpumask_t mask;
struct work_struct throttle;
bool restore;
} *chips;
static int nr_chips;
/* /*
* Note: The set of pstates consists of contiguous integers, the * Note: The set of pstates consists of contiguous integers, the
...@@ -298,28 +309,35 @@ static inline unsigned int get_nominal_index(void) ...@@ -298,28 +309,35 @@ static inline unsigned int get_nominal_index(void)
return powernv_pstate_info.max - powernv_pstate_info.nominal; return powernv_pstate_info.max - powernv_pstate_info.nominal;
} }
static void powernv_cpufreq_throttle_check(unsigned int cpu) static void powernv_cpufreq_throttle_check(void *data)
{ {
unsigned int cpu = smp_processor_id();
unsigned long pmsr; unsigned long pmsr;
int pmsr_pmax, pmsr_lp; int pmsr_pmax, i;
pmsr = get_pmspr(SPRN_PMSR); pmsr = get_pmspr(SPRN_PMSR);
for (i = 0; i < nr_chips; i++)
if (chips[i].id == cpu_to_chip_id(cpu))
break;
/* Check for Pmax Capping */ /* Check for Pmax Capping */
pmsr_pmax = (s8)PMSR_MAX(pmsr); pmsr_pmax = (s8)PMSR_MAX(pmsr);
if (pmsr_pmax != powernv_pstate_info.max) { if (pmsr_pmax != powernv_pstate_info.max) {
throttled = true; if (chips[i].throttled)
pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax); goto next;
pr_info("Max allowed Pstate is capped\n"); chips[i].throttled = true;
pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
chips[i].id, pmsr_pmax);
} else if (chips[i].throttled) {
chips[i].throttled = false;
pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
chips[i].id, pmsr_pmax);
} }
/* /* Check if Psafe_mode_active is set in PMSR. */
* Check for Psafe by reading LocalPstate next:
* or check if Psafe_mode_active is set in PMSR. if (pmsr & PMSR_PSAFE_ENABLE) {
*/
pmsr_lp = (s8)PMSR_LP(pmsr);
if ((pmsr_lp < powernv_pstate_info.min) ||
(pmsr & PMSR_PSAFE_ENABLE)) {
throttled = true; throttled = true;
pr_info("Pstate set to safe frequency\n"); pr_info("Pstate set to safe frequency\n");
} }
...@@ -350,7 +368,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, ...@@ -350,7 +368,7 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
return 0; return 0;
if (!throttled) if (!throttled)
powernv_cpufreq_throttle_check(smp_processor_id()); powernv_cpufreq_throttle_check(NULL);
freq_data.pstate_id = powernv_freqs[new_index].driver_data; freq_data.pstate_id = powernv_freqs[new_index].driver_data;
...@@ -395,6 +413,119 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { ...@@ -395,6 +413,119 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
.notifier_call = powernv_cpufreq_reboot_notifier, .notifier_call = powernv_cpufreq_reboot_notifier,
}; };
void powernv_cpufreq_work_fn(struct work_struct *work)
{
struct chip *chip = container_of(work, struct chip, throttle);
unsigned int cpu;
cpumask_var_t mask;
smp_call_function_any(&chip->mask,
powernv_cpufreq_throttle_check, NULL, 0);
if (!chip->restore)
return;
chip->restore = false;
cpumask_copy(mask, &chip->mask);
for_each_cpu_and(cpu, mask, cpu_online_mask) {
int index, tcpu;
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu);
cpufreq_frequency_table_target(&policy, policy.freq_table,
policy.cur,
CPUFREQ_RELATION_C, &index);
powernv_cpufreq_target_index(&policy, index);
for_each_cpu(tcpu, policy.cpus)
cpumask_clear_cpu(tcpu, mask);
}
}
static char throttle_reason[][30] = {
"No throttling",
"Power Cap",
"Processor Over Temperature",
"Power Supply Failure",
"Over Current",
"OCC Reset"
};
static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
unsigned long msg_type, void *_msg)
{
struct opal_msg *msg = _msg;
struct opal_occ_msg omsg;
int i;
if (msg_type != OPAL_MSG_OCC)
return 0;
omsg.type = be64_to_cpu(msg->params[0]);
switch (omsg.type) {
case OCC_RESET:
occ_reset = true;
pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
/*
* powernv_cpufreq_throttle_check() is called in
* target() callback which can detect the throttle state
* for governors like ondemand.
* But static governors will not call target() often thus
* report throttling here.
*/
if (!throttled) {
throttled = true;
pr_crit("CPU frequency is throttled for duration\n");
}
break;
case OCC_LOAD:
pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
break;
case OCC_THROTTLE:
omsg.chip = be64_to_cpu(msg->params[1]);
omsg.throttle_status = be64_to_cpu(msg->params[2]);
if (occ_reset) {
occ_reset = false;
throttled = false;
pr_info("OCC Active, CPU frequency is no longer throttled\n");
for (i = 0; i < nr_chips; i++) {
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
return 0;
}
if (omsg.throttle_status &&
omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
pr_info("OCC: Chip %u Pmax reduced due to %s\n",
(unsigned int)omsg.chip,
throttle_reason[omsg.throttle_status]);
else if (!omsg.throttle_status)
pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
throttle_reason[omsg.throttle_status]);
else
return 0;
for (i = 0; i < nr_chips; i++)
if (chips[i].id == omsg.chip) {
if (!omsg.throttle_status)
chips[i].restore = true;
schedule_work(&chips[i].throttle);
}
}
return 0;
}
static struct notifier_block powernv_cpufreq_opal_nb = {
.notifier_call = powernv_cpufreq_occ_msg,
.next = NULL,
.priority = 0,
};
static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{ {
struct powernv_smp_call_data freq_data; struct powernv_smp_call_data freq_data;
...@@ -414,6 +545,36 @@ static struct cpufreq_driver powernv_cpufreq_driver = { ...@@ -414,6 +545,36 @@ static struct cpufreq_driver powernv_cpufreq_driver = {
.attr = powernv_cpu_freq_attr, .attr = powernv_cpu_freq_attr,
}; };
static int init_chip_info(void)
{
unsigned int chip[256];
unsigned int cpu, i;
unsigned int prev_chip_id = UINT_MAX;
for_each_possible_cpu(cpu) {
unsigned int id = cpu_to_chip_id(cpu);
if (prev_chip_id != id) {
prev_chip_id = id;
chip[nr_chips++] = id;
}
}
chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
if (!chips)
return -ENOMEM;
for (i = 0; i < nr_chips; i++) {
chips[i].id = chip[i];
chips[i].throttled = false;
cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
chips[i].restore = false;
}
return 0;
}
static int __init powernv_cpufreq_init(void) static int __init powernv_cpufreq_init(void)
{ {
int rc = 0; int rc = 0;
...@@ -429,7 +590,13 @@ static int __init powernv_cpufreq_init(void) ...@@ -429,7 +590,13 @@ static int __init powernv_cpufreq_init(void)
return rc; return rc;
} }
/* Populate chip info */
rc = init_chip_info();
if (rc)
return rc;
register_reboot_notifier(&powernv_cpufreq_reboot_nb); register_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
return cpufreq_register_driver(&powernv_cpufreq_driver); return cpufreq_register_driver(&powernv_cpufreq_driver);
} }
module_init(powernv_cpufreq_init); module_init(powernv_cpufreq_init);
...@@ -437,6 +604,8 @@ module_init(powernv_cpufreq_init); ...@@ -437,6 +604,8 @@ module_init(powernv_cpufreq_init);
static void __exit powernv_cpufreq_exit(void) static void __exit powernv_cpufreq_exit(void)
{ {
unregister_reboot_notifier(&powernv_cpufreq_reboot_nb); unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
opal_message_notifier_unregister(OPAL_MSG_OCC,
&powernv_cpufreq_opal_nb);
cpufreq_unregister_driver(&powernv_cpufreq_driver); cpufreq_unregister_driver(&powernv_cpufreq_driver);
} }
module_exit(powernv_cpufreq_exit); module_exit(powernv_cpufreq_exit);
......
...@@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb, ...@@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb,
struct cpufreq_frequency_table *cbe_freqs; struct cpufreq_frequency_table *cbe_freqs;
u8 node; u8 node;
/* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
* and CPUFREQ_NOTIFY policy events?) * policy events?)
*/ */
if (event == CPUFREQ_START) if (event == CPUFREQ_START)
return 0; return 0;
......
...@@ -45,12 +45,10 @@ static int sfi_parse_freq(struct sfi_table_header *table) ...@@ -45,12 +45,10 @@ static int sfi_parse_freq(struct sfi_table_header *table)
pentry = (struct sfi_freq_table_entry *)sb->pentry; pentry = (struct sfi_freq_table_entry *)sb->pentry;
totallen = num_freq_table_entries * sizeof(*pentry); totallen = num_freq_table_entries * sizeof(*pentry);
sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL); sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
if (!sfi_cpufreq_array) if (!sfi_cpufreq_array)
return -ENOMEM; return -ENOMEM;
memcpy(sfi_cpufreq_array, pentry, totallen);
return 0; return 0;
} }
......
...@@ -386,7 +386,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, ...@@ -386,7 +386,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
unsigned int prev_speed; unsigned int prev_speed;
unsigned int ret = 0; unsigned int ret = 0;
unsigned long flags; unsigned long flags;
struct timeval tv1, tv2; ktime_t tv1, tv2;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL; return -EINVAL;
...@@ -415,14 +415,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, ...@@ -415,14 +415,14 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
/* start latency measurement */ /* start latency measurement */
if (transition_latency) if (transition_latency)
do_gettimeofday(&tv1); tv1 = ktime_get();
/* switch to high state */ /* switch to high state */
set_state(SPEEDSTEP_HIGH); set_state(SPEEDSTEP_HIGH);
/* end latency measurement */ /* end latency measurement */
if (transition_latency) if (transition_latency)
do_gettimeofday(&tv2); tv2 = ktime_get();
*high_speed = speedstep_get_frequency(processor); *high_speed = speedstep_get_frequency(processor);
if (!*high_speed) { if (!*high_speed) {
...@@ -442,8 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor, ...@@ -442,8 +442,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
set_state(SPEEDSTEP_LOW); set_state(SPEEDSTEP_LOW);
if (transition_latency) { if (transition_latency) {
*transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + *transition_latency = ktime_to_us(ktime_sub(tv2, tv1));
tv2.tv_usec - tv1.tv_usec;
pr_debug("transition latency is %u uSec\n", *transition_latency); pr_debug("transition latency is %u uSec\n", *transition_latency);
/* convert uSec to nSec and add 20% for safety reasons */ /* convert uSec to nSec and add 20% for safety reasons */
......
...@@ -1668,7 +1668,6 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data) ...@@ -1668,7 +1668,6 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
switch (val) { switch (val) {
case CPUFREQ_ADJUST: case CPUFREQ_ADJUST:
case CPUFREQ_INCOMPATIBLE:
pr_debug("min dma period: %d ps, " pr_debug("min dma period: %d ps, "
"new clock %d kHz\n", pxafb_display_dma_period(var), "new clock %d kHz\n", pxafb_display_dma_period(var),
policy->max); policy->max);
......
...@@ -1042,7 +1042,6 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val, ...@@ -1042,7 +1042,6 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val,
switch (val) { switch (val) {
case CPUFREQ_ADJUST: case CPUFREQ_ADJUST:
case CPUFREQ_INCOMPATIBLE:
dev_dbg(fbi->dev, "min dma period: %d ps, " dev_dbg(fbi->dev, "min dma period: %d ps, "
"new clock %d kHz\n", sa1100fb_min_dma_period(fbi), "new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
policy->max); policy->max);
......
...@@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void) ...@@ -560,11 +560,9 @@ static int __init xen_acpi_processor_init(void)
return 0; return 0;
err_unregister: err_unregister:
for_each_possible_cpu(i) { for_each_possible_cpu(i)
struct acpi_processor_performance *perf; acpi_processor_unregister_performance(i);
perf = per_cpu_ptr(acpi_perf_data, i);
acpi_processor_unregister_performance(perf, i);
}
err_out: err_out:
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
free_acpi_perf_data(); free_acpi_perf_data();
...@@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void) ...@@ -579,11 +577,9 @@ static void __exit xen_acpi_processor_exit(void)
kfree(acpi_ids_done); kfree(acpi_ids_done);
kfree(acpi_id_present); kfree(acpi_id_present);
kfree(acpi_id_cst_present); kfree(acpi_id_cst_present);
for_each_possible_cpu(i) { for_each_possible_cpu(i)
struct acpi_processor_performance *perf; acpi_processor_unregister_performance(i);
perf = per_cpu_ptr(acpi_perf_data, i);
acpi_processor_unregister_performance(perf, i);
}
free_acpi_perf_data(); free_acpi_perf_data();
} }
......
...@@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct ...@@ -228,10 +228,7 @@ extern int acpi_processor_preregister_performance(struct
extern int acpi_processor_register_performance(struct acpi_processor_performance extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu); *performance, unsigned int cpu);
extern void acpi_processor_unregister_performance(struct extern void acpi_processor_unregister_performance(unsigned int cpu);
acpi_processor_performance
*performance,
unsigned int cpu);
/* note: this locks both the calling module and the processor module /* note: this locks both the calling module and the processor module
if a _PPC object exists, rmmod is disallowed then */ if a _PPC object exists, rmmod is disallowed then */
......
...@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo { ...@@ -51,11 +51,9 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency; unsigned int transition_latency;
}; };
struct cpufreq_real_policy { struct cpufreq_user_policy {
unsigned int min; /* in kHz */ unsigned int min; /* in kHz */
unsigned int max; /* in kHz */ unsigned int max; /* in kHz */
unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
}; };
struct cpufreq_policy { struct cpufreq_policy {
...@@ -88,7 +86,7 @@ struct cpufreq_policy { ...@@ -88,7 +86,7 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */ * called, but you're in IRQ context */
struct cpufreq_real_policy user_policy; struct cpufreq_user_policy user_policy;
struct cpufreq_frequency_table *freq_table; struct cpufreq_frequency_table *freq_table;
struct list_head policy_list; struct list_head policy_list;
...@@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {} ...@@ -369,11 +367,10 @@ static inline void cpufreq_resume(void) {}
/* Policy Notifiers */ /* Policy Notifiers */
#define CPUFREQ_ADJUST (0) #define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1) #define CPUFREQ_NOTIFY (1)
#define CPUFREQ_NOTIFY (2) #define CPUFREQ_START (2)
#define CPUFREQ_START (3) #define CPUFREQ_CREATE_POLICY (3)
#define CPUFREQ_CREATE_POLICY (4) #define CPUFREQ_REMOVE_POLICY (4)
#define CPUFREQ_REMOVE_POLICY (5)
#ifdef CONFIG_CPU_FREQ #ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment