Commit 7f6c69dc authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] Make acpi-cpufreq more robust against BIOS freq changes behind our back.
  [CPUFREQ] change cpu freq tables to per_cpu variables
  [CPUFREQ] fix show_trans_table
  [CPUFREQ] Warn when cpufreq_register_notifier called before pure initcalls
  [CPUFREQ] Refactor locking in cpufreq_add_dev
  [CPUFREQ] more CodingStyle
  [CPUFREQ] CodingStyle
  [CPUFREQ] Slightly shorten the error paths of cpufreq_suspend/cpufreq_resume
parents 1e5ad9a3 e56a727b
......@@ -339,6 +339,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
{
struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
unsigned int freq;
unsigned int cached_freq;
dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
......@@ -347,7 +348,16 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return 0;
}
cached_freq = data->freq_table[data->acpi_data->state].frequency;
freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
if (freq != cached_freq) {
/*
* The dreaded BIOS frequency change behind our back.
* Force set the frequency on next target call.
*/
data->resume = 1;
}
dprintk("cur freq = %u\n", freq);
return freq;
......
......@@ -97,7 +97,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
#define CPUFREQ_THERMAL_MIN_STEP 0
#define CPUFREQ_THERMAL_MAX_STEP 3
static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS];
static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
static unsigned int acpi_thermal_cpufreq_is_init = 0;
static int cpu_has_cpufreq(unsigned int cpu)
......@@ -113,9 +113,9 @@ static int acpi_thermal_cpufreq_increase(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return -ENODEV;
if (cpufreq_thermal_reduction_pctg[cpu] <
if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
CPUFREQ_THERMAL_MAX_STEP) {
cpufreq_thermal_reduction_pctg[cpu]++;
per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
cpufreq_update_policy(cpu);
return 0;
}
......@@ -128,14 +128,14 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return -ENODEV;
if (cpufreq_thermal_reduction_pctg[cpu] >
if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
(CPUFREQ_THERMAL_MIN_STEP + 1))
cpufreq_thermal_reduction_pctg[cpu]--;
per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
else
cpufreq_thermal_reduction_pctg[cpu] = 0;
per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
cpufreq_update_policy(cpu);
/* We reached max freq again and can leave passive mode */
return !cpufreq_thermal_reduction_pctg[cpu];
return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
}
static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
......@@ -147,9 +147,10 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
if (event != CPUFREQ_ADJUST)
goto out;
max_freq =
(policy->cpuinfo.max_freq *
(100 - cpufreq_thermal_reduction_pctg[policy->cpu] * 20)) / 100;
max_freq = (
policy->cpuinfo.max_freq *
(100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
) / 100;
cpufreq_verify_within_limits(policy, 0, max_freq);
......@@ -174,7 +175,7 @@ static int cpufreq_get_cur_state(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return 0;
return cpufreq_thermal_reduction_pctg[cpu];
return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
}
static int cpufreq_set_cur_state(unsigned int cpu, int state)
......@@ -182,7 +183,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (!cpu_has_cpufreq(cpu))
return 0;
cpufreq_thermal_reduction_pctg[cpu] = state;
per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
cpufreq_update_policy(cpu);
return 0;
}
......@@ -191,8 +192,9 @@ void acpi_thermal_cpufreq_init(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
cpufreq_thermal_reduction_pctg[i] = 0;
for (i = 0; i < nr_cpu_ids; i++)
if (cpu_present(i))
per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
......
......@@ -118,9 +118,11 @@ static void handle_update(struct work_struct *work);
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
static struct srcu_notifier_head cpufreq_transition_notifier_list;
static bool init_cpufreq_transition_notifier_list_called;
static int __init init_cpufreq_transition_notifier_list(void)
{
srcu_init_notifier_head(&cpufreq_transition_notifier_list);
init_cpufreq_transition_notifier_list_called = true;
return 0;
}
pure_initcall(init_cpufreq_transition_notifier_list);
......@@ -216,7 +218,7 @@ static void cpufreq_debug_disable_ratelimit(void)
}
void cpufreq_debug_printk(unsigned int type, const char *prefix,
const char *fmt, ...)
const char *fmt, ...)
{
char s[256];
va_list args;
......@@ -378,7 +380,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
/**
* cpufreq_parse_governor - parse a governor string
*/
static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
struct cpufreq_governor **governor)
{
int err = -EINVAL;
......@@ -446,7 +448,7 @@ extern struct sysdev_class cpu_sysdev_class;
#define show_one(file_name, object) \
static ssize_t show_##file_name \
(struct cpufreq_policy * policy, char *buf) \
(struct cpufreq_policy *policy, char *buf) \
{ \
return sprintf (buf, "%u\n", policy->object); \
}
......@@ -465,7 +467,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
*/
#define store_one(file_name, object) \
static ssize_t store_##file_name \
(struct cpufreq_policy * policy, const char *buf, size_t count) \
(struct cpufreq_policy *policy, const char *buf, size_t count) \
{ \
unsigned int ret = -EINVAL; \
struct cpufreq_policy new_policy; \
......@@ -490,8 +492,8 @@ store_one(scaling_max_freq,max);
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
*/
static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
char *buf)
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy->cpu);
if (!cur_freq)
......@@ -503,8 +505,7 @@ static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
/**
* show_scaling_governor - show the current policy for the specified CPU
*/
static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
char *buf)
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
return sprintf(buf, "powersave\n");
......@@ -519,8 +520,8 @@ static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
/**
* store_scaling_governor - store policy for the specified CPU
*/
static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
const char *buf, size_t count)
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int ret = -EINVAL;
char str_governor[16];
......@@ -554,7 +555,7 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
/**
* show_scaling_driver - show the cpufreq driver currently loaded
*/
static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
}
......@@ -562,8 +563,8 @@ static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
/**
* show_scaling_available_governors - show the available CPUfreq governors
*/
static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
char *buf)
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
char *buf)
{
ssize_t i = 0;
struct cpufreq_governor *t;
......@@ -585,7 +586,7 @@ static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
/**
* show_affected_cpus - show the CPUs affected by each transition
*/
static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
{
ssize_t i = 0;
unsigned int cpu;
......@@ -602,7 +603,7 @@ static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
}
static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
const char *buf, size_t count)
{
unsigned int freq = 0;
unsigned int ret;
......@@ -651,7 +652,7 @@ define_one_rw(scaling_max_freq);
define_one_rw(scaling_governor);
define_one_rw(scaling_setspeed);
static struct attribute * default_attrs[] = {
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&scaling_min_freq.attr,
......@@ -667,10 +668,10 @@ static struct attribute * default_attrs[] = {
#define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
#define to_attr(a) container_of(a,struct freq_attr,attr)
static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf)
{
struct cpufreq_policy * policy = to_policy(kobj);
struct freq_attr * fattr = to_attr(attr);
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
......@@ -691,11 +692,11 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
return ret;
}
static ssize_t store(struct kobject * kobj, struct attribute * attr,
const char * buf, size_t count)
static ssize_t store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct cpufreq_policy * policy = to_policy(kobj);
struct freq_attr * fattr = to_attr(attr);
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get(policy->cpu);
if (!policy)
......@@ -716,9 +717,9 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
return ret;
}
static void cpufreq_sysfs_release(struct kobject * kobj)
static void cpufreq_sysfs_release(struct kobject *kobj)
{
struct cpufreq_policy * policy = to_policy(kobj);
struct cpufreq_policy *policy = to_policy(kobj);
dprintk("last reference is dropped\n");
complete(&policy->kobj_unregister);
}
......@@ -740,7 +741,7 @@ static struct kobj_type ktype_cpufreq = {
*
* Adds the cpufreq interface for a CPU device.
*/
static int cpufreq_add_dev (struct sys_device * sys_dev)
static int cpufreq_add_dev(struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
int ret = 0;
......@@ -800,7 +801,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
ret = cpufreq_driver->init(policy);
if (ret) {
dprintk("initialization failed\n");
unlock_policy_rwsem_write(cpu);
goto err_out;
}
policy->user_policy.min = policy->cpuinfo.min_freq;
......@@ -823,7 +823,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
/* check for existing affected CPUs. They may not be aware
* of it due to CPU Hotplug.
*/
managed_policy = cpufreq_cpu_get(j);
managed_policy = cpufreq_cpu_get(j); // FIXME: Where is this released? What about error paths?
if (unlikely(managed_policy)) {
/* Set proper policy_cpu */
......@@ -842,14 +842,11 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
ret = sysfs_create_link(&sys_dev->kobj,
&managed_policy->kobj,
"cpufreq");
if (ret) {
unlock_policy_rwsem_write(cpu);
if (ret)
goto err_out_driver_exit;
}
cpufreq_debug_enable_ratelimit();
ret = 0;
unlock_policy_rwsem_write(cpu);
goto err_out_driver_exit; /* call driver->exit() */
}
}
......@@ -859,33 +856,26 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
"cpufreq");
if (ret) {
unlock_policy_rwsem_write(cpu);
if (ret)
goto err_out_driver_exit;
}
/* set up files for this cpu device */
drv_attr = cpufreq_driver->attr;
while ((drv_attr) && (*drv_attr)) {
ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
if (ret) {
unlock_policy_rwsem_write(cpu);
if (ret)
goto err_out_driver_exit;
}
drv_attr++;
}
if (cpufreq_driver->get){
if (cpufreq_driver->get) {
ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
if (ret) {
unlock_policy_rwsem_write(cpu);
if (ret)
goto err_out_driver_exit;
}
}
if (cpufreq_driver->target){
if (cpufreq_driver->target) {
ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
if (ret) {
unlock_policy_rwsem_write(cpu);
if (ret)
goto err_out_driver_exit;
}
}
spin_lock_irqsave(&cpufreq_driver_lock, flags);
......@@ -907,10 +897,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
cpu_sys_dev = get_cpu_sysdev(j);
ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) {
unlock_policy_rwsem_write(cpu);
if (ret)
goto err_out_unregister;
}
}
policy->governor = NULL; /* to assure that the starting sequence is
......@@ -950,6 +938,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
cpufreq_driver->exit(policy);
err_out:
unlock_policy_rwsem_write(cpu);
kfree(policy);
nomem_out:
......@@ -967,7 +956,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
static int __cpufreq_remove_dev (struct sys_device * sys_dev)
static int __cpufreq_remove_dev(struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long flags;
......@@ -1071,7 +1060,7 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
}
static int cpufreq_remove_dev (struct sys_device * sys_dev)
static int cpufreq_remove_dev(struct sys_device *sys_dev)
{
unsigned int cpu = sys_dev->id;
int retval;
......@@ -1138,7 +1127,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
cpufreq_cpu_put(policy);
}
return (ret_freq);
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_quick_get);
......@@ -1149,7 +1138,7 @@ static unsigned int __cpufreq_get(unsigned int cpu)
unsigned int ret_freq = 0;
if (!cpufreq_driver->get)
return (ret_freq);
return ret_freq;
ret_freq = cpufreq_driver->get(cpu);
......@@ -1163,7 +1152,7 @@ static unsigned int __cpufreq_get(unsigned int cpu)
}
}
return (ret_freq);
return ret_freq;
}
/**
......@@ -1190,7 +1179,7 @@ unsigned int cpufreq_get(unsigned int cpu)
out_policy:
cpufreq_cpu_put(policy);
out:
return (ret_freq);
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_get);
......@@ -1199,7 +1188,7 @@ EXPORT_SYMBOL(cpufreq_get);
* cpufreq_suspend - let the low level driver prepare for suspend
*/
static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
{
int cpu = sysdev->id;
int ret = 0;
......@@ -1221,22 +1210,18 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
return -EINVAL;
/* only handle each CPU group once */
if (unlikely(cpu_policy->cpu != cpu)) {
cpufreq_cpu_put(cpu_policy);
return 0;
}
if (unlikely(cpu_policy->cpu != cpu))
goto out;
if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(cpu_policy, pmsg);
if (ret) {
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
cpufreq_cpu_put(cpu_policy);
return ret;
goto out;
}
}
if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
goto out;
......@@ -1270,7 +1255,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
out:
cpufreq_cpu_put(cpu_policy);
return 0;
return ret;
}
/**
......@@ -1281,7 +1266,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
* 3.) schedule call cpufreq_update_policy() ASAP as interrupts are
* restored.
*/
static int cpufreq_resume(struct sys_device * sysdev)
static int cpufreq_resume(struct sys_device *sysdev)
{
int cpu = sysdev->id;
int ret = 0;
......@@ -1302,18 +1287,15 @@ static int cpufreq_resume(struct sys_device * sysdev)
return -EINVAL;
/* only handle each CPU group once */
if (unlikely(cpu_policy->cpu != cpu)) {
cpufreq_cpu_put(cpu_policy);
return 0;
}
if (unlikely(cpu_policy->cpu != cpu))
goto fail;
if (cpufreq_driver->resume) {
ret = cpufreq_driver->resume(cpu_policy);
if (ret) {
printk(KERN_ERR "cpufreq: resume failed in ->resume "
"step on CPU %u\n", cpu_policy->cpu);
cpufreq_cpu_put(cpu_policy);
return ret;
goto fail;
}
}
......@@ -1353,6 +1335,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
out:
schedule_work(&cpu_policy->update);
fail:
cpufreq_cpu_put(cpu_policy);
return ret;
}
......@@ -1386,6 +1369,8 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
WARN_ON(!init_cpufreq_transition_notifier_list_called);
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_register(
......@@ -1848,7 +1833,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_debug_enable_ratelimit();
}
return (ret);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
......
......@@ -114,7 +114,7 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
stat->freq_table[i]);
}
if (len >= PAGE_SIZE)
return len;
return PAGE_SIZE;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
......@@ -131,8 +131,12 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
stat->trans_table[i*stat->max_state+j]);
}
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
return len;
}
CPUFREQ_STATDEVICE_ATTR(trans_table,0444,show_trans_table);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment