Commit 3f5760b9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] Make cpufreq suspend code conditional on powerpc.
  [CPUFREQ] Fix a kobject reference bug related to managed CPUs
  [CPUFREQ] Do not set policy for offline cpus
  [CPUFREQ] Fix NULL pointer dereference regression in conservative governor
parents 624720e0 4bc5d341
...@@ -858,6 +858,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -858,6 +858,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* Check for existing affected CPUs. /* Check for existing affected CPUs.
* They may not be aware of it due to CPU Hotplug. * They may not be aware of it due to CPU Hotplug.
* cpufreq_cpu_put is called when the device is removed
* in __cpufreq_remove_dev()
*/ */
managed_policy = cpufreq_cpu_get(j); managed_policy = cpufreq_cpu_get(j);
if (unlikely(managed_policy)) { if (unlikely(managed_policy)) {
...@@ -884,7 +886,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -884,7 +886,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = sysfs_create_link(&sys_dev->kobj, ret = sysfs_create_link(&sys_dev->kobj,
&managed_policy->kobj, &managed_policy->kobj,
"cpufreq"); "cpufreq");
if (!ret) if (ret)
cpufreq_cpu_put(managed_policy); cpufreq_cpu_put(managed_policy);
/* /*
* Success. We only needed to be added to the mask. * Success. We only needed to be added to the mask.
...@@ -924,6 +926,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -924,6 +926,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
spin_lock_irqsave(&cpufreq_driver_lock, flags); spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
if (!cpu_online(j))
continue;
per_cpu(cpufreq_cpu_data, j) = policy; per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(policy_cpu, j) = policy->cpu; per_cpu(policy_cpu, j) = policy->cpu;
} }
...@@ -1244,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get); ...@@ -1244,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get);
static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
{ {
int cpu = sysdev->id;
int ret = 0; int ret = 0;
#ifdef __powerpc__
int cpu = sysdev->id;
unsigned int cur_freq = 0; unsigned int cur_freq = 0;
struct cpufreq_policy *cpu_policy; struct cpufreq_policy *cpu_policy;
dprintk("suspending cpu %u\n", cpu); dprintk("suspending cpu %u\n", cpu);
/*
* This whole bogosity is here because Powerbooks are made of fail.
* No sane platform should need any of the code below to be run.
* (it's entirely the wrong thing to do, as driver->get may
* reenable interrupts on some architectures).
*/
if (!cpu_online(cpu)) if (!cpu_online(cpu))
return 0; return 0;
...@@ -1309,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) ...@@ -1309,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
out: out:
cpufreq_cpu_put(cpu_policy); cpufreq_cpu_put(cpu_policy);
#endif /* __powerpc__ */
return ret; return ret;
} }
...@@ -1322,12 +1336,18 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) ...@@ -1322,12 +1336,18 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
*/ */
static int cpufreq_resume(struct sys_device *sysdev) static int cpufreq_resume(struct sys_device *sysdev)
{ {
int cpu = sysdev->id;
int ret = 0; int ret = 0;
#ifdef __powerpc__
int cpu = sysdev->id;
struct cpufreq_policy *cpu_policy; struct cpufreq_policy *cpu_policy;
dprintk("resuming cpu %u\n", cpu); dprintk("resuming cpu %u\n", cpu);
/* As with the ->suspend method, all the code below is
* only necessary because Powerbooks suck.
* See commit 42d4dc3f4e1e for jokes. */
if (!cpu_online(cpu)) if (!cpu_online(cpu))
return 0; return 0;
...@@ -1391,6 +1411,7 @@ static int cpufreq_resume(struct sys_device *sysdev) ...@@ -1391,6 +1411,7 @@ static int cpufreq_resume(struct sys_device *sysdev)
schedule_work(&cpu_policy->update); schedule_work(&cpu_policy->update);
fail: fail:
cpufreq_cpu_put(cpu_policy); cpufreq_cpu_put(cpu_policy);
#endif /* __powerpc__ */
return ret; return ret;
} }
......
...@@ -63,6 +63,7 @@ struct cpu_dbs_info_s { ...@@ -63,6 +63,7 @@ struct cpu_dbs_info_s {
unsigned int down_skip; unsigned int down_skip;
unsigned int requested_freq; unsigned int requested_freq;
int cpu; int cpu;
unsigned int enable:1;
/* /*
* percpu mutex that serializes governor limit change with * percpu mutex that serializes governor limit change with
* do_dbs_timer invocation. We do not want do_dbs_timer to run * do_dbs_timer invocation. We do not want do_dbs_timer to run
...@@ -141,6 +142,9 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -141,6 +142,9 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
if (!this_dbs_info->enable)
return 0;
policy = this_dbs_info->cur_policy; policy = this_dbs_info->cur_policy;
/* /*
...@@ -497,6 +501,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) ...@@ -497,6 +501,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay; delay -= jiffies % delay;
dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
delay); delay);
...@@ -504,6 +509,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) ...@@ -504,6 +509,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{ {
dbs_info->enable = 0;
cancel_delayed_work_sync(&dbs_info->work); cancel_delayed_work_sync(&dbs_info->work);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment