Commit 69f8947b authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-cpufreq' and 'pm-cpuidle'

* pm-cpufreq:
  cpufreq: postfix policy directory with the first CPU in related_cpus
  cpufreq: create cpu/cpufreq/policyX directories
  cpufreq: remove cpufreq_sysfs_{create|remove}_file()
  cpufreq: create cpu/cpufreq at boot time
  cpufreq: Use cpumask_copy instead of cpumask_or to copy a mask
  cpufreq: ondemand: Drop unnecessary locks from update_sampling_rate()
  cpufreq: intel_pstate: Fix intel_pstate powersave min_perf_pct value
  cpufreq: intel_pstate: Avoid calculation for max/min
  Documentation: kernel_parameters for Intel P state driver
  cpufreq: intel_pstate: Use ACPI perf configuration
  cpufreq: intel-pstate: Use separate max pstate for scaling
  cpufreq: intel_pstate: get P1 from TAR when available
  cpufreq: Drop redundant check for inactive policies
  cpufreq : powernv: Report Pmax throttling if capped below nominal frequency
  cpufreq: imx: update the clock switch flow to support imx6ul
  cpufreq: tegra20: remove superfluous CONFIG_PM ifdefs
  cpufreq: conservative: remove 'enable' field
  cpufreq: integrator: Fix module autoload for OF platform driver

* pm-cpuidle:
  cpuidle: mvebu: disable the bind/unbind attributes and use builtin_platform_driver
  cpuidle: mvebu: clean up multiple platform drivers
......@@ -1547,6 +1547,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
hwp_only
Only load intel_pstate on systems which support
hardware P state control (HWP) if available.
no_acpi
Don't use ACPI processor performance control objects
_PSS and _PPC specified limits.
intremap= [X86-64, Intel-IOMMU]
on enable Interrupt Remapping (default)
......
......@@ -206,6 +206,13 @@
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL1 0x00000649
#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
/* Hardware P state interface */
#define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f
......
......@@ -5,6 +5,7 @@
config X86_INTEL_PSTATE
bool "Intel P state control"
depends on X86
select ACPI_PROCESSOR if ACPI
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
......
......@@ -843,18 +843,11 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
down_write(&policy->rwsem);
/* Updating inactive policies is invalid, so avoid doing that. */
if (unlikely(policy_is_inactive(policy))) {
ret = -EBUSY;
goto unlock_policy_rwsem;
}
if (fattr->store)
ret = fattr->store(policy, buf, count);
else
ret = -EIO;
unlock_policy_rwsem:
up_write(&policy->rwsem);
unlock:
put_online_cpus();
......@@ -880,49 +873,6 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
static int cpufreq_global_kobject_usage;
int cpufreq_get_global_kobject(void)
{
if (!cpufreq_global_kobject_usage++)
return kobject_add(cpufreq_global_kobject,
&cpu_subsys.dev_root->kobj, "%s", "cpufreq");
return 0;
}
EXPORT_SYMBOL(cpufreq_get_global_kobject);
void cpufreq_put_global_kobject(void)
{
if (!--cpufreq_global_kobject_usage)
kobject_del(cpufreq_global_kobject);
}
EXPORT_SYMBOL(cpufreq_put_global_kobject);
int cpufreq_sysfs_create_file(const struct attribute *attr)
{
int ret = cpufreq_get_global_kobject();
if (!ret) {
ret = sysfs_create_file(cpufreq_global_kobject, attr);
if (ret)
cpufreq_put_global_kobject();
}
return ret;
}
EXPORT_SYMBOL(cpufreq_sysfs_create_file);
void cpufreq_sysfs_remove_file(const struct attribute *attr)
{
sysfs_remove_file(cpufreq_global_kobject, attr);
cpufreq_put_global_kobject();
}
EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
{
struct device *cpu_dev;
......@@ -960,9 +910,6 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
/* Some related CPUs might not be present (physically hotplugged) */
for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
ret = add_cpu_dev_symlink(policy, j);
if (ret)
break;
......@@ -976,12 +923,8 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
unsigned int j;
/* Some related CPUs might not be present (physically hotplugged) */
for_each_cpu(j, policy->real_cpus) {
if (j == policy->kobj_cpu)
continue;
for_each_cpu(j, policy->real_cpus)
remove_cpu_dev_symlink(policy, j);
}
}
static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
......@@ -1079,7 +1022,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
struct cpufreq_policy *policy;
int ret;
if (WARN_ON(!dev))
return NULL;
......@@ -1097,13 +1039,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
goto err_free_rcpumask;
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
"cpufreq");
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
goto err_free_real_cpus;
}
kobject_init(&policy->kobj, &ktype_cpufreq);
INIT_LIST_HEAD(&policy->policy_list);
init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
......@@ -1112,14 +1048,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
INIT_WORK(&policy->update, handle_update);
policy->cpu = cpu;
/* Set this once on allocation */
policy->kobj_cpu = cpu;
return policy;
err_free_real_cpus:
free_cpumask_var(policy->real_cpus);
err_free_rcpumask:
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
......@@ -1221,9 +1151,19 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy) {
/* related_cpus should at least include policy->cpus. */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
cpumask_copy(policy->related_cpus, policy->cpus);
/* Remember CPUs present at the policy creation time. */
cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
/* Name and add the kobject */
ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
"policy%u",
cpumask_first(policy->related_cpus));
if (ret) {
pr_err("%s: failed to add policy->kobj: %d\n", __func__,
ret);
goto out_exit_policy;
}
}
/*
......@@ -1467,22 +1407,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return;
}
if (cpu != policy->kobj_cpu) {
remove_cpu_dev_symlink(policy, cpu);
} else {
/*
* The CPU owning the policy object is going away. Move it to
* another suitable CPU.
*/
unsigned int new_cpu = cpumask_first(policy->real_cpus);
struct device *new_dev = get_cpu_device(new_cpu);
dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
sysfs_remove_link(&new_dev->kobj, "cpufreq");
policy->kobj_cpu = new_cpu;
WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
}
remove_cpu_dev_symlink(policy, cpu);
}
static void handle_update(struct work_struct *work)
......@@ -2425,7 +2350,7 @@ static int create_boost_sysfs_file(void)
if (!cpufreq_driver->set_boost)
cpufreq_driver->set_boost = cpufreq_boost_set_sw;
ret = cpufreq_sysfs_create_file(&boost.attr);
ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
if (ret)
pr_err("%s: cannot register global BOOST sysfs file\n",
__func__);
......@@ -2436,7 +2361,7 @@ static int create_boost_sysfs_file(void)
static void remove_boost_sysfs_file(void)
{
if (cpufreq_boost_supported())
cpufreq_sysfs_remove_file(&boost.attr);
sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
}
int cpufreq_enable_boost_support(void)
......@@ -2584,12 +2509,15 @@ static struct syscore_ops cpufreq_syscore_ops = {
.shutdown = cpufreq_suspend,
};
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
static int __init cpufreq_core_init(void)
{
if (cpufreq_disabled())
return -ENODEV;
cpufreq_global_kobject = kobject_create();
cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
......
......@@ -23,6 +23,19 @@
static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
#endif
struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
.governor = cs_cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
struct cpufreq_policy *policy)
{
......@@ -119,12 +132,14 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_freqs *freq = data;
struct cs_cpu_dbs_info_s *dbs_info =
&per_cpu(cs_cpu_dbs_info, freq->cpu);
struct cpufreq_policy *policy;
struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
if (!dbs_info->enable)
if (!policy)
return 0;
policy = dbs_info->cdbs.shared->policy;
/* policy isn't governed by conservative governor */
if (policy->governor != &cpufreq_gov_conservative)
return 0;
/*
* we only care if our internally tracked freq moves outside the 'valid'
......@@ -367,16 +382,6 @@ static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
}
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
#endif
struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
.governor = cs_cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static int __init cpufreq_gov_dbs_init(void)
{
return cpufreq_register_governor(&cpufreq_gov_conservative);
......
......@@ -348,29 +348,21 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
latency * LATENCY_MULTIPLIER));
if (!have_governor_per_policy()) {
if (WARN_ON(cpufreq_get_global_kobject())) {
ret = -EINVAL;
goto cdata_exit;
}
if (!have_governor_per_policy())
cdata->gdbs_data = dbs_data;
}
ret = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (ret)
goto put_kobj;
goto reset_gdbs_data;
policy->governor_data = dbs_data;
return 0;
put_kobj:
if (!have_governor_per_policy()) {
reset_gdbs_data:
if (!have_governor_per_policy())
cdata->gdbs_data = NULL;
cpufreq_put_global_kobject();
}
cdata_exit:
cdata->exit(dbs_data, !policy->governor->initialized);
free_common_dbs_info:
free_common_dbs_info(policy, cdata);
......@@ -394,10 +386,8 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
sysfs_remove_group(get_governor_parent_kobj(policy),
get_sysfs_attr(dbs_data));
if (!have_governor_per_policy()) {
if (!have_governor_per_policy())
cdata->gdbs_data = NULL;
cpufreq_put_global_kobject();
}
cdata->exit(dbs_data, policy->governor->initialized == 1);
kfree(dbs_data);
......@@ -463,7 +453,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
cdata->get_cpu_dbs_info_s(cpu);
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
cs_dbs_info->requested_freq = policy->cur;
} else {
struct od_ops *od_ops = cdata->gov_ops;
......@@ -482,9 +471,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
static int cpufreq_governor_stop(struct cpufreq_policy *policy,
struct dbs_data *dbs_data)
{
struct common_dbs_data *cdata = dbs_data->cdata;
unsigned int cpu = policy->cpu;
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
struct cpu_common_dbs_info *shared = cdbs->shared;
/* State should be equivalent to START */
......@@ -493,13 +480,6 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy,
gov_cancel_work(dbs_data, policy);
if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_cpu_dbs_info_s *cs_dbs_info =
cdata->get_cpu_dbs_info_s(cpu);
cs_dbs_info->enable = 0;
}
shared->policy = NULL;
mutex_destroy(&shared->timer_mutex);
return 0;
......
......@@ -170,7 +170,6 @@ struct cs_cpu_dbs_info_s {
struct cpu_dbs_info cdbs;
unsigned int down_skip;
unsigned int requested_freq;
unsigned int enable:1;
};
/* Per policy Governors sysfs tunables */
......
......@@ -267,27 +267,19 @@ static void update_sampling_rate(struct dbs_data *dbs_data,
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy);
mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
if (!delayed_work_pending(&dbs_info->cdbs.dwork))
continue;
}
next_sampling = jiffies + usecs_to_jiffies(new_rate);
appointed_at = dbs_info->cdbs.dwork.timer.expires;
if (time_before(next_sampling, appointed_at)) {
mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
gov_queue_work(dbs_data, policy,
usecs_to_jiffies(new_rate), true);
}
mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
}
}
......
......@@ -30,6 +30,10 @@ static struct clk *pll1_sw_clk;
static struct clk *step_clk;
static struct clk *pll2_pfd2_396m_clk;
/* clk used by i.MX6UL */
static struct clk *pll2_bus_clk;
static struct clk *secondary_sel_clk;
static struct device *cpu_dev;
static bool free_opp;
static struct cpufreq_frequency_table *freq_table;
......@@ -91,16 +95,36 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* The setpoints are selected per PLL/PDF frequencies, so we need to
* reprogram PLL for frequency scaling. The procedure of reprogramming
* PLL1 is as below.
*
* For i.MX6UL, it has a secondary clk mux, the cpu frequency change
* flow is slightly different from other i.MX6 OSC.
* The cpu frequeny change flow for i.MX6(except i.MX6UL) is as below:
* - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk
*/
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, new_freq * 1000);
if (of_machine_is_compatible("fsl,imx6ul")) {
/*
* When changing pll1_sw_clk's parent to pll1_sys_clk,
* CPU may run at higher than 528MHz, this will lead to
* the system unstable if the voltage is lower than the
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
clk_set_parent(secondary_sel_clk, pll2_bus_clk);
else
clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
clk_set_parent(step_clk, secondary_sel_clk);
clk_set_parent(pll1_sw_clk, step_clk);
} else {
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
}
}
/* Ensure the arm clock divider is what we expect */
......@@ -186,6 +210,16 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_clk;
}
if (of_machine_is_compatible("fsl,imx6ul")) {
pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
ret = -ENOENT;
goto put_clk;
}
}
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
soc_reg = regulator_get(cpu_dev, "soc");
......@@ -331,6 +365,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
clk_put(step_clk);
if (!IS_ERR(pll2_pfd2_396m_clk))
clk_put(pll2_pfd2_396m_clk);
if (!IS_ERR(pll2_bus_clk))
clk_put(pll2_bus_clk);
if (!IS_ERR(secondary_sel_clk))
clk_put(secondary_sel_clk);
of_node_put(np);
return ret;
}
......@@ -350,6 +388,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
clk_put(pll1_sw_clk);
clk_put(step_clk);
clk_put(pll2_pfd2_396m_clk);
clk_put(pll2_bus_clk);
clk_put(secondary_sel_clk);
return 0;
}
......
......@@ -221,6 +221,8 @@ static const struct of_device_id integrator_cpufreq_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, integrator_cpufreq_match);
static struct platform_driver integrator_cpufreq_driver = {
.driver = {
.name = "integrator-cpufreq",
......
This diff is collapsed.
......@@ -327,8 +327,14 @@ static void powernv_cpufreq_throttle_check(void *data)
if (chips[i].throttled)
goto next;
chips[i].throttled = true;
pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
chips[i].id, pmsr_pmax);
if (pmsr_pmax < powernv_pstate_info.nominal)
pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
cpu, chips[i].id, pmsr_pmax,
powernv_pstate_info.nominal);
else
pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
cpu, chips[i].id, pmsr_pmax,
powernv_pstate_info.max);
} else if (chips[i].throttled) {
chips[i].throttled = false;
pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
......
......@@ -175,9 +175,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
.exit = tegra_cpu_exit,
.name = "tegra",
.attr = cpufreq_generic_attr,
#ifdef CONFIG_PM
.suspend = cpufreq_generic_suspend,
#endif
};
static int __init tegra_cpufreq_init(void)
......
......@@ -99,44 +99,40 @@ static struct cpuidle_driver armada38x_idle_driver = {
static int mvebu_v7_cpuidle_probe(struct platform_device *pdev)
{
mvebu_v7_cpu_suspend = pdev->dev.platform_data;
const struct platform_device_id *id = pdev->id_entry;
if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-xp"))
return cpuidle_register(&armadaxp_idle_driver, NULL);
else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-370"))
return cpuidle_register(&armada370_idle_driver, NULL);
else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-38x"))
return cpuidle_register(&armada38x_idle_driver, NULL);
else
if (!id)
return -EINVAL;
}
static struct platform_driver armadaxp_cpuidle_plat_driver = {
.driver = {
.name = "cpuidle-armada-xp",
},
.probe = mvebu_v7_cpuidle_probe,
};
mvebu_v7_cpu_suspend = pdev->dev.platform_data;
module_platform_driver(armadaxp_cpuidle_plat_driver);
return cpuidle_register((struct cpuidle_driver *)id->driver_data, NULL);
}
static struct platform_driver armada370_cpuidle_plat_driver = {
.driver = {
static const struct platform_device_id mvebu_cpuidle_ids[] = {
{
.name = "cpuidle-armada-xp",
.driver_data = (unsigned long)&armadaxp_idle_driver,
}, {
.name = "cpuidle-armada-370",
.driver_data = (unsigned long)&armada370_idle_driver,
}, {
.name = "cpuidle-armada-38x",
.driver_data = (unsigned long)&armada38x_idle_driver,
},
.probe = mvebu_v7_cpuidle_probe,
{}
};
module_platform_driver(armada370_cpuidle_plat_driver);
static struct platform_driver armada38x_cpuidle_plat_driver = {
static struct platform_driver mvebu_cpuidle_driver = {
.probe = mvebu_v7_cpuidle_probe,
.driver = {
.name = "cpuidle-armada-38x",
.name = "cpuidle-mbevu",
.suppress_bind_attrs = true,
},
.probe = mvebu_v7_cpuidle_probe,
.id_table = mvebu_cpuidle_ids,
};
module_platform_driver(armada38x_cpuidle_plat_driver);
builtin_platform_driver(mvebu_cpuidle_driver);
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
MODULE_DESCRIPTION("Marvell EBU v7 cpuidle driver");
......
......@@ -65,7 +65,6 @@ struct cpufreq_policy {
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
unsigned int cpu; /* cpu managing this policy, must be online */
unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
struct clk *clk;
struct cpufreq_cpuinfo cpuinfo;/* see above */
......@@ -149,10 +148,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
int cpufreq_get_global_kobject(void);
void cpufreq_put_global_kobject(void);
int cpufreq_sysfs_create_file(const struct attribute *attr);
void cpufreq_sysfs_remove_file(const struct attribute *attr);
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_get(unsigned int cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment