Commit b4b21cac authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] Powernow-k8: support family 0xf with 2 low p-states
  [CPUFREQ] fix (utter) cpufreq_add_dev mess
  [CPUFREQ] Cleanup locking in conservative governor
  [CPUFREQ] Cleanup locking in ondemand governor
  [CPUFREQ] Mark policy_rwsem as going static in cpufreq.c wont be exported
  [CPUFREQ] Eliminate the recent lockdep warnings in cpufreq
parents 728b690f a2e1b4c3
...@@ -458,3 +458,13 @@ Why: Remove the old legacy 32bit machine check code. This has been ...@@ -458,3 +458,13 @@ Why: Remove the old legacy 32bit machine check code. This has been
but the old version has been kept around for easier testing. Note this but the old version has been kept around for easier testing. Note this
doesn't impact the old P5 and WinChip machine check handlers. doesn't impact the old P5 and WinChip machine check handlers.
Who: Andi Kleen <andi@firstfloor.org> Who: Andi Kleen <andi@firstfloor.org>
----------------------------
What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be
exported interface anymore.
When: 2.6.33
Why: cpu_policy_rwsem has a new cleaner definition making it local to
cpufreq core and contained inside cpufreq.c. Other dependent
drivers should not use it in order to safely avoid lockdep issues.
Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
...@@ -299,7 +299,7 @@ static int transition_pstate(struct powernow_k8_data *data, u32 pstate) ...@@ -299,7 +299,7 @@ static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
static int transition_fid_vid(struct powernow_k8_data *data, static int transition_fid_vid(struct powernow_k8_data *data,
u32 reqfid, u32 reqvid) u32 reqfid, u32 reqvid)
{ {
if (core_voltage_pre_transition(data, reqvid)) if (core_voltage_pre_transition(data, reqvid, reqfid))
return 1; return 1;
if (core_frequency_transition(data, reqfid)) if (core_frequency_transition(data, reqfid))
...@@ -327,17 +327,20 @@ static int transition_fid_vid(struct powernow_k8_data *data, ...@@ -327,17 +327,20 @@ static int transition_fid_vid(struct powernow_k8_data *data,
/* Phase 1 - core voltage transition ... setup voltage */ /* Phase 1 - core voltage transition ... setup voltage */
static int core_voltage_pre_transition(struct powernow_k8_data *data, static int core_voltage_pre_transition(struct powernow_k8_data *data,
u32 reqvid) u32 reqvid, u32 reqfid)
{ {
u32 rvosteps = data->rvo; u32 rvosteps = data->rvo;
u32 savefid = data->currfid; u32 savefid = data->currfid;
u32 maxvid, lo; u32 maxvid, lo, rvomult = 1;
dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, "
"reqvid 0x%x, rvo 0x%x\n", "reqvid 0x%x, rvo 0x%x\n",
smp_processor_id(), smp_processor_id(),
data->currfid, data->currvid, reqvid, data->rvo); data->currfid, data->currvid, reqvid, data->rvo);
if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP))
rvomult = 2;
rvosteps *= rvomult;
rdmsr(MSR_FIDVID_STATUS, lo, maxvid); rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
maxvid = 0x1f & (maxvid >> 16); maxvid = 0x1f & (maxvid >> 16);
dprintk("ph1 maxvid=0x%x\n", maxvid); dprintk("ph1 maxvid=0x%x\n", maxvid);
...@@ -351,7 +354,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, ...@@ -351,7 +354,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data,
return 1; return 1;
} }
while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) { while ((rvosteps > 0) &&
((rvomult * data->rvo + data->currvid) > reqvid)) {
if (data->currvid == maxvid) { if (data->currvid == maxvid) {
rvosteps = 0; rvosteps = 0;
} else { } else {
...@@ -384,13 +388,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) ...@@ -384,13 +388,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
u32 vcoreqfid, vcocurrfid, vcofiddiff; u32 vcoreqfid, vcocurrfid, vcofiddiff;
u32 fid_interval, savevid = data->currvid; u32 fid_interval, savevid = data->currvid;
if ((reqfid < HI_FID_TABLE_BOTTOM) &&
(data->currfid < HI_FID_TABLE_BOTTOM)) {
printk(KERN_ERR PFX "ph2: illegal lo-lo transition "
"0x%x 0x%x\n", reqfid, data->currfid);
return 1;
}
if (data->currfid == reqfid) { if (data->currfid == reqfid) {
printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n",
data->currfid); data->currfid);
...@@ -407,6 +404,9 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) ...@@ -407,6 +404,9 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
: vcoreqfid - vcocurrfid; : vcoreqfid - vcocurrfid;
if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP))
vcofiddiff = 0;
while (vcofiddiff > 2) { while (vcofiddiff > 2) {
(data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2);
...@@ -1081,14 +1081,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, ...@@ -1081,14 +1081,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
return 0; return 0;
} }
if ((fid < HI_FID_TABLE_BOTTOM) &&
(data->currfid < HI_FID_TABLE_BOTTOM)) {
printk(KERN_ERR PFX
"ignoring illegal change in lo freq table-%x to 0x%x\n",
data->currfid, fid);
return 1;
}
dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n",
smp_processor_id(), fid, vid); smp_processor_id(), fid, vid);
freqs.old = find_khz_freq_from_fid(data->currfid); freqs.old = find_khz_freq_from_fid(data->currfid);
......
...@@ -215,7 +215,8 @@ struct pst_s { ...@@ -215,7 +215,8 @@ struct pst_s {
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg)
static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid); static int core_voltage_pre_transition(struct powernow_k8_data *data,
u32 reqvid, u32 regfid);
static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid);
static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
......
...@@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = { ...@@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = {
* cpufreq_add_dev - add a CPU device * cpufreq_add_dev - add a CPU device
* *
* Adds the cpufreq interface for a CPU device. * Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/ */
static int cpufreq_add_dev(struct sys_device *sys_dev) static int cpufreq_add_dev(struct sys_device *sys_dev)
{ {
...@@ -804,15 +808,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -804,15 +808,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
goto nomem_out; goto nomem_out;
} }
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
kfree(policy);
ret = -ENOMEM; ret = -ENOMEM;
goto nomem_out; goto err_free_policy;
} }
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
free_cpumask_var(policy->cpus);
kfree(policy);
ret = -ENOMEM; ret = -ENOMEM;
goto nomem_out; goto err_free_cpumask;
} }
policy->cpu = cpu; policy->cpu = cpu;
...@@ -820,7 +821,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -820,7 +821,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* Initially set CPU itself as the policy_cpu */ /* Initially set CPU itself as the policy_cpu */
per_cpu(policy_cpu, cpu) = cpu; per_cpu(policy_cpu, cpu) = cpu;
lock_policy_rwsem_write(cpu); ret = (lock_policy_rwsem_write(cpu) < 0);
WARN_ON(ret);
init_completion(&policy->kobj_unregister); init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update); INIT_WORK(&policy->update, handle_update);
...@@ -833,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -833,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = cpufreq_driver->init(policy); ret = cpufreq_driver->init(policy);
if (ret) { if (ret) {
dprintk("initialization failed\n"); dprintk("initialization failed\n");
goto err_out; goto err_unlock_policy;
} }
policy->user_policy.min = policy->min; policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max; policy->user_policy.max = policy->max;
...@@ -858,15 +860,21 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -858,15 +860,21 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
/* Check for existing affected CPUs. /* Check for existing affected CPUs.
* They may not be aware of it due to CPU Hotplug. * They may not be aware of it due to CPU Hotplug.
*/ */
managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */ managed_policy = cpufreq_cpu_get(j);
if (unlikely(managed_policy)) { if (unlikely(managed_policy)) {
/* Set proper policy_cpu */ /* Set proper policy_cpu */
unlock_policy_rwsem_write(cpu); unlock_policy_rwsem_write(cpu);
per_cpu(policy_cpu, cpu) = managed_policy->cpu; per_cpu(policy_cpu, cpu) = managed_policy->cpu;
if (lock_policy_rwsem_write(cpu) < 0) if (lock_policy_rwsem_write(cpu) < 0) {
goto err_out_driver_exit; /* Should not go through policy unlock path */
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
ret = -EBUSY;
cpufreq_cpu_put(managed_policy);
goto err_free_cpumask;
}
spin_lock_irqsave(&cpufreq_driver_lock, flags); spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_copy(managed_policy->cpus, policy->cpus); cpumask_copy(managed_policy->cpus, policy->cpus);
...@@ -877,12 +885,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -877,12 +885,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = sysfs_create_link(&sys_dev->kobj, ret = sysfs_create_link(&sys_dev->kobj,
&managed_policy->kobj, &managed_policy->kobj,
"cpufreq"); "cpufreq");
if (ret) if (!ret)
goto err_out_driver_exit; cpufreq_cpu_put(managed_policy);
/*
cpufreq_debug_enable_ratelimit(); * Success. We only needed to be added to the mask.
ret = 0; * Call driver->exit() because only the cpu parent of
goto err_out_driver_exit; /* call driver->exit() */ * the kobj needed to call init().
*/
goto out_driver_exit; /* call driver->exit() */
} }
} }
#endif #endif
...@@ -892,25 +902,25 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -892,25 +902,25 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
"cpufreq"); "cpufreq");
if (ret) if (ret)
goto err_out_driver_exit; goto out_driver_exit;
/* set up files for this cpu device */ /* set up files for this cpu device */
drv_attr = cpufreq_driver->attr; drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) { while ((drv_attr) && (*drv_attr)) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret) if (ret)
goto err_out_driver_exit; goto err_out_kobj_put;
drv_attr++; drv_attr++;
} }
if (cpufreq_driver->get) { if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
if (ret) if (ret)
goto err_out_driver_exit; goto err_out_kobj_put;
} }
if (cpufreq_driver->target) { if (cpufreq_driver->target) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret) if (ret)
goto err_out_driver_exit; goto err_out_kobj_put;
} }
spin_lock_irqsave(&cpufreq_driver_lock, flags); spin_lock_irqsave(&cpufreq_driver_lock, flags);
...@@ -928,13 +938,15 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -928,13 +938,15 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
continue; continue;
dprintk("CPU %u already managed, adding link\n", j); dprintk("CPU %u already managed, adding link\n", j);
cpufreq_cpu_get(cpu); managed_policy = cpufreq_cpu_get(cpu);
cpu_sys_dev = get_cpu_sysdev(j); cpu_sys_dev = get_cpu_sysdev(j);
ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
"cpufreq"); "cpufreq");
if (ret) if (ret) {
cpufreq_cpu_put(managed_policy);
goto err_out_unregister; goto err_out_unregister;
} }
}
policy->governor = NULL; /* to assure that the starting sequence is policy->governor = NULL; /* to assure that the starting sequence is
* run in cpufreq_set_policy */ * run in cpufreq_set_policy */
...@@ -965,17 +977,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) ...@@ -965,17 +977,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
per_cpu(cpufreq_cpu_data, j) = NULL; per_cpu(cpufreq_cpu_data, j) = NULL;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags); spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_out_kobj_put:
kobject_put(&policy->kobj); kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister); wait_for_completion(&policy->kobj_unregister);
err_out_driver_exit: out_driver_exit:
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
err_out: err_unlock_policy:
unlock_policy_rwsem_write(cpu); unlock_policy_rwsem_write(cpu);
err_free_cpumask:
free_cpumask_var(policy->cpus);
err_free_policy:
kfree(policy); kfree(policy);
nomem_out: nomem_out:
module_put(cpufreq_driver->owner); module_put(cpufreq_driver->owner);
module_out: module_out:
...@@ -1070,8 +1085,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) ...@@ -1070,8 +1085,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
spin_unlock_irqrestore(&cpufreq_driver_lock, flags); spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif #endif
unlock_policy_rwsem_write(cpu);
if (cpufreq_driver->target) if (cpufreq_driver->target)
__cpufreq_governor(data, CPUFREQ_GOV_STOP); __cpufreq_governor(data, CPUFREQ_GOV_STOP);
...@@ -1088,6 +1101,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) ...@@ -1088,6 +1101,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(data); cpufreq_driver->exit(data);
unlock_policy_rwsem_write(cpu);
free_cpumask_var(data->related_cpus); free_cpumask_var(data->related_cpus);
free_cpumask_var(data->cpus); free_cpumask_var(data->cpus);
kfree(data); kfree(data);
......
...@@ -63,22 +63,20 @@ struct cpu_dbs_info_s { ...@@ -63,22 +63,20 @@ struct cpu_dbs_info_s {
unsigned int down_skip; unsigned int down_skip;
unsigned int requested_freq; unsigned int requested_freq;
int cpu; int cpu;
unsigned int enable:1; /*
* percpu mutex that serializes governor limit change with
* do_dbs_timer invocation. We do not want do_dbs_timer to run
* when user is changing the governor or limits.
*/
struct mutex timer_mutex;
}; };
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */ static unsigned int dbs_enable; /* number of CPUs using this policy */
/* /*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
* lock and dbs_mutex. cpu_hotplug lock should always be held before * different CPUs. It protects dbs_enable in governor start/stop.
* dbs_mutex. If any function that can potentially take cpu_hotplug lock
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
* DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
* would deadlock with cancel_delayed_work_sync(), which is needed for proper
* raceless workqueue teardown.
*/ */
static DEFINE_MUTEX(dbs_mutex); static DEFINE_MUTEX(dbs_mutex);
...@@ -143,9 +141,6 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -143,9 +141,6 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
if (!this_dbs_info->enable)
return 0;
policy = this_dbs_info->cur_policy; policy = this_dbs_info->cur_policy;
/* /*
...@@ -488,18 +483,12 @@ static void do_dbs_timer(struct work_struct *work) ...@@ -488,18 +483,12 @@ static void do_dbs_timer(struct work_struct *work)
delay -= jiffies % delay; delay -= jiffies % delay;
if (lock_policy_rwsem_write(cpu) < 0) mutex_lock(&dbs_info->timer_mutex);
return;
if (!dbs_info->enable) {
unlock_policy_rwsem_write(cpu);
return;
}
dbs_check_cpu(dbs_info); dbs_check_cpu(dbs_info);
queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu); mutex_unlock(&dbs_info->timer_mutex);
} }
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
...@@ -508,7 +497,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) ...@@ -508,7 +497,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay; delay -= jiffies % delay;
dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
delay); delay);
...@@ -516,7 +504,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) ...@@ -516,7 +504,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{ {
dbs_info->enable = 0;
cancel_delayed_work_sync(&dbs_info->work); cancel_delayed_work_sync(&dbs_info->work);
} }
...@@ -535,9 +522,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -535,9 +522,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if ((!cpu_online(cpu)) || (!policy->cur)) if ((!cpu_online(cpu)) || (!policy->cur))
return -EINVAL; return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
mutex_lock(&dbs_mutex); mutex_lock(&dbs_mutex);
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
...@@ -561,6 +545,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -561,6 +545,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->down_skip = 0; this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur; this_dbs_info->requested_freq = policy->cur;
mutex_init(&this_dbs_info->timer_mutex);
dbs_enable++; dbs_enable++;
/* /*
* Start the timerschedule work, when this governor * Start the timerschedule work, when this governor
...@@ -590,17 +575,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -590,17 +575,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
&dbs_cpufreq_notifier_block, &dbs_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER); CPUFREQ_TRANSITION_NOTIFIER);
} }
dbs_timer_init(this_dbs_info);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
dbs_timer_init(this_dbs_info);
break; break;
case CPUFREQ_GOV_STOP: case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info); dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group); sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--; dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
/* /*
* Stop the timerschedule work, when this governor * Stop the timerschedule work, when this governor
...@@ -616,7 +603,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -616,7 +603,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break; break;
case CPUFREQ_GOV_LIMITS: case CPUFREQ_GOV_LIMITS:
mutex_lock(&dbs_mutex); mutex_lock(&this_dbs_info->timer_mutex);
if (policy->max < this_dbs_info->cur_policy->cur) if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target( __cpufreq_driver_target(
this_dbs_info->cur_policy, this_dbs_info->cur_policy,
...@@ -625,7 +612,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -625,7 +612,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
__cpufreq_driver_target( __cpufreq_driver_target(
this_dbs_info->cur_policy, this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L); policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex); mutex_unlock(&this_dbs_info->timer_mutex);
break; break;
} }
......
...@@ -70,23 +70,21 @@ struct cpu_dbs_info_s { ...@@ -70,23 +70,21 @@ struct cpu_dbs_info_s {
unsigned int freq_lo_jiffies; unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies; unsigned int freq_hi_jiffies;
int cpu; int cpu;
unsigned int enable:1, unsigned int sample_type:1;
sample_type:1; /*
* percpu mutex that serializes governor limit change with
* do_dbs_timer invocation. We do not want do_dbs_timer to run
* when user is changing the governor or limits.
*/
struct mutex timer_mutex;
}; };
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */ static unsigned int dbs_enable; /* number of CPUs using this policy */
/* /*
* DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
* lock and dbs_mutex. cpu_hotplug lock should always be held before * different CPUs. It protects dbs_enable in governor start/stop.
* dbs_mutex. If any function that can potentially take cpu_hotplug lock
* (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
* DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
* would deadlock with cancel_delayed_work_sync(), which is needed for proper
* raceless workqueue teardown.
*/ */
static DEFINE_MUTEX(dbs_mutex); static DEFINE_MUTEX(dbs_mutex);
...@@ -192,13 +190,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, ...@@ -192,13 +190,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
return freq_hi; return freq_hi;
} }
static void ondemand_powersave_bias_init_cpu(int cpu)
{
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
dbs_info->freq_lo = 0;
}
static void ondemand_powersave_bias_init(void) static void ondemand_powersave_bias_init(void)
{ {
int i; int i;
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); ondemand_powersave_bias_init_cpu(i);
dbs_info->freq_table = cpufreq_frequency_get_table(i);
dbs_info->freq_lo = 0;
} }
} }
...@@ -240,12 +243,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, ...@@ -240,12 +243,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
unsigned int input; unsigned int input;
int ret; int ret;
ret = sscanf(buf, "%u", &input); ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
mutex_lock(&dbs_mutex); mutex_lock(&dbs_mutex);
if (ret != 1) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
...@@ -259,13 +260,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, ...@@ -259,13 +260,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
int ret; int ret;
ret = sscanf(buf, "%u", &input); ret = sscanf(buf, "%u", &input);
mutex_lock(&dbs_mutex);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
input < MIN_FREQUENCY_UP_THRESHOLD) { input < MIN_FREQUENCY_UP_THRESHOLD) {
mutex_unlock(&dbs_mutex);
return -EINVAL; return -EINVAL;
} }
mutex_lock(&dbs_mutex);
dbs_tuners_ins.up_threshold = input; dbs_tuners_ins.up_threshold = input;
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
...@@ -363,9 +363,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -363,9 +363,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
unsigned int j; unsigned int j;
if (!this_dbs_info->enable)
return;
this_dbs_info->freq_lo = 0; this_dbs_info->freq_lo = 0;
policy = this_dbs_info->cur_policy; policy = this_dbs_info->cur_policy;
...@@ -493,14 +490,7 @@ static void do_dbs_timer(struct work_struct *work) ...@@ -493,14 +490,7 @@ static void do_dbs_timer(struct work_struct *work)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay; delay -= jiffies % delay;
mutex_lock(&dbs_info->timer_mutex);
if (lock_policy_rwsem_write(cpu) < 0)
return;
if (!dbs_info->enable) {
unlock_policy_rwsem_write(cpu);
return;
}
/* Common NORMAL_SAMPLE setup */ /* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = DBS_NORMAL_SAMPLE; dbs_info->sample_type = DBS_NORMAL_SAMPLE;
...@@ -517,7 +507,7 @@ static void do_dbs_timer(struct work_struct *work) ...@@ -517,7 +507,7 @@ static void do_dbs_timer(struct work_struct *work)
dbs_info->freq_lo, CPUFREQ_RELATION_H); dbs_info->freq_lo, CPUFREQ_RELATION_H);
} }
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu); mutex_unlock(&dbs_info->timer_mutex);
} }
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
...@@ -526,8 +516,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) ...@@ -526,8 +516,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay; delay -= jiffies % delay;
dbs_info->enable = 1;
ondemand_powersave_bias_init();
dbs_info->sample_type = DBS_NORMAL_SAMPLE; dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
...@@ -536,7 +524,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) ...@@ -536,7 +524,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{ {
dbs_info->enable = 0;
cancel_delayed_work_sync(&dbs_info->work); cancel_delayed_work_sync(&dbs_info->work);
} }
...@@ -555,19 +542,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -555,19 +542,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if ((!cpu_online(cpu)) || (!policy->cur)) if ((!cpu_online(cpu)) || (!policy->cur))
return -EINVAL; return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
mutex_lock(&dbs_mutex); mutex_lock(&dbs_mutex);
dbs_enable++;
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
if (rc) { if (rc) {
dbs_enable--;
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
return rc; return rc;
} }
dbs_enable++;
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info; struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info = &per_cpu(cpu_dbs_info, j);
...@@ -581,6 +564,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -581,6 +564,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
} }
} }
this_dbs_info->cpu = cpu; this_dbs_info->cpu = cpu;
ondemand_powersave_bias_init_cpu(cpu);
mutex_init(&this_dbs_info->timer_mutex);
/* /*
* Start the timerschedule work, when this governor * Start the timerschedule work, when this governor
* is used for first time * is used for first time
...@@ -598,29 +583,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -598,29 +583,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
max(min_sampling_rate, max(min_sampling_rate,
latency * LATENCY_MULTIPLIER); latency * LATENCY_MULTIPLIER);
} }
dbs_timer_init(this_dbs_info);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
dbs_timer_init(this_dbs_info);
break; break;
case CPUFREQ_GOV_STOP: case CPUFREQ_GOV_STOP:
mutex_lock(&dbs_mutex);
dbs_timer_exit(this_dbs_info); dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
sysfs_remove_group(&policy->kobj, &dbs_attr_group); sysfs_remove_group(&policy->kobj, &dbs_attr_group);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--; dbs_enable--;
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
break; break;
case CPUFREQ_GOV_LIMITS: case CPUFREQ_GOV_LIMITS:
mutex_lock(&dbs_mutex); mutex_lock(&this_dbs_info->timer_mutex);
if (policy->max < this_dbs_info->cur_policy->cur) if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy, __cpufreq_driver_target(this_dbs_info->cur_policy,
policy->max, CPUFREQ_RELATION_H); policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur) else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy, __cpufreq_driver_target(this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L); policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex); mutex_unlock(&this_dbs_info->timer_mutex);
break; break;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment