Commit ef294986 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq

* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] Longhaul - Redo Longhaul ver. 2
  [CPUFREQ] EPS - Correct 2nd brand test
  [CPUFREQ] Longhaul - Separate frequency and voltage transition
  [CPUFREQ] Longhaul - Models of Nehemiah
  [CPUFREQ] Whitespace fixup
  [CPUFREQ] Longhaul - Simplier minmult
  [CPUFREQ] CPU_FREQ_TABLE shouldn't be a def_tristate
  [CPUFREQ] ondemand governor use new cpufreq rwsem locking in work callback
  [CPUFREQ] ondemand governor restructure the work callback
  [CPUFREQ] Rewrite lock in cpufreq to eliminate cpufreq/hotplug related issues
  [CPUFREQ] Remove hotplug cpu crap
  [CPUFREQ] Enhanced PowerSaver driver
  [CPUFREQ] Longhaul - Add VT8235 support
  [CPUFREQ] Longhaul - Fix guess_fsb function
  [CPUFREQ] Longhaul - Remove duplicate tables
  [CPUFREQ] Longhaul - Introduce Nehemiah C
  [CPUFREQ] fix cpuinfo_cur_freq for CPU_HW_PSTATE
  [CPUFREQ] Longhaul - Remove "ignore_latency" option
parents 0187f221 2b8c0e13
......@@ -217,6 +217,15 @@ config X86_LONGHAUL
If in doubt, say N.
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)"
select CPU_FREQ_TABLE
depends on EXPERIMENTAL
help
This adds the CPUFreq driver for VIA C7 processors.
If in doubt, say N.
comment "shared options"
config X86_ACPI_CPUFREQ_PROC_INTF
......
......@@ -2,6 +2,7 @@ obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o
obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o
obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
obj-$(CONFIG_X86_LONGRUN) += longrun.o
......
/*
* Based on documentation provided by Dave Jones. Thanks!
*
* Licensed under the terms of the GNU GPL License version 2.
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <asm/msr.h>
#include <asm/tsc.h>
#include <asm/timex.h>
#include <asm/io.h>
#include <asm/delay.h>
#define EPS_BRAND_C7M 0
#define EPS_BRAND_C7 1
#define EPS_BRAND_EDEN 2
#define EPS_BRAND_C3 3
struct eps_cpu_data {
u32 fsb;
struct cpufreq_frequency_table freq_table[];
};
static struct eps_cpu_data *eps_cpu[NR_CPUS];
static unsigned int eps_get(unsigned int cpu)
{
struct eps_cpu_data *centaur;
u32 lo, hi;
if (cpu)
return 0;
centaur = eps_cpu[cpu];
if (centaur == NULL)
return 0;
/* Return current frequency */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
return centaur->fsb * ((lo >> 8) & 0xff);
}
static int eps_set_state(struct eps_cpu_data *centaur,
unsigned int cpu,
u32 dest_state)
{
struct cpufreq_freqs freqs;
u32 lo, hi;
int err = 0;
int i;
freqs.old = eps_get(cpu);
freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
freqs.cpu = cpu;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* Wait while CPU is busy */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i = 0;
while (lo & ((1 << 16) | (1 << 17))) {
udelay(16);
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
err = -ENODEV;
goto postchange;
}
}
/* Set new multiplier and voltage */
wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0);
/* Wait until transition end */
i = 0;
do {
udelay(16);
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
i++;
if (unlikely(i > 64)) {
err = -ENODEV;
goto postchange;
}
} while (lo & ((1 << 16) | (1 << 17)));
/* Return current frequency */
postchange:
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return err;
}
static int eps_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct eps_cpu_data *centaur;
unsigned int newstate = 0;
unsigned int cpu = policy->cpu;
unsigned int dest_state;
int ret;
if (unlikely(eps_cpu[cpu] == NULL))
return -ENODEV;
centaur = eps_cpu[cpu];
if (unlikely(cpufreq_frequency_table_target(policy,
&eps_cpu[cpu]->freq_table[0],
target_freq,
relation,
&newstate))) {
return -EINVAL;
}
/* Make frequency transition */
dest_state = centaur->freq_table[newstate].index & 0xffff;
ret = eps_set_state(centaur, cpu, dest_state);
if (ret)
printk(KERN_ERR "eps: Timeout!\n");
return ret;
}
static int eps_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&eps_cpu[policy->cpu]->freq_table[0]);
}
static int eps_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
u32 lo, hi;
u64 val;
u8 current_multiplier, current_voltage;
u8 max_multiplier, max_voltage;
u8 min_multiplier, min_voltage;
u8 brand;
u32 fsb;
struct eps_cpu_data *centaur;
struct cpufreq_frequency_table *f_table;
int k, step, voltage;
int ret;
int states;
if (policy->cpu != 0)
return -ENODEV;
/* Check brand */
printk("eps: Detected VIA ");
rdmsr(0x1153, lo, hi);
brand = (((lo >> 2) ^ lo) >> 18) & 3;
switch(brand) {
case EPS_BRAND_C7M:
printk("C7-M\n");
break;
case EPS_BRAND_C7:
printk("C7\n");
break;
case EPS_BRAND_EDEN:
printk("Eden\n");
break;
case EPS_BRAND_C3:
printk("C3\n");
return -ENODEV;
break;
}
/* Enable Enhanced PowerSaver */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & 1 << 16)) {
val |= 1 << 16;
wrmsrl(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & 1 << 16)) {
printk("eps: Can't enable Enhanced PowerSaver\n");
return -ENODEV;
}
}
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
printk("eps: Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
printk("eps: Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
printk("eps: Highest voltage = %dmV\n", max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
printk("eps: Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
printk("eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
printk("eps: Lowest multiplier = %d\n", min_multiplier);
/* Sanity checks */
if (current_multiplier == 0 || max_multiplier == 0
|| min_multiplier == 0)
return -EINVAL;
if (current_multiplier > max_multiplier
|| max_multiplier <= min_multiplier)
return -EINVAL;
if (current_voltage > 0x1c || max_voltage > 0x1c)
return -EINVAL;
if (max_voltage < min_voltage)
return -EINVAL;
/* Calc FSB speed */
fsb = cpu_khz / current_multiplier;
/* Calc number of p-states supported */
if (brand == EPS_BRAND_C7M)
states = max_multiplier - min_multiplier + 1;
else
states = 2;
/* Allocate private data and frequency table for current cpu */
centaur = kzalloc(sizeof(struct eps_cpu_data)
+ (states + 1) * sizeof(struct cpufreq_frequency_table),
GFP_KERNEL);
if (!centaur)
return -ENOMEM;
eps_cpu[0] = centaur;
/* Copy basic values */
centaur->fsb = fsb;
/* Fill frequency and MSR value table */
f_table = &centaur->freq_table[0];
if (brand != EPS_BRAND_C7M) {
f_table[0].frequency = fsb * min_multiplier;
f_table[0].index = (min_multiplier << 8) | min_voltage;
f_table[1].frequency = fsb * max_multiplier;
f_table[1].index = (max_multiplier << 8) | max_voltage;
f_table[2].frequency = CPUFREQ_TABLE_END;
} else {
k = 0;
step = ((max_voltage - min_voltage) * 256)
/ (max_multiplier - min_multiplier);
for (i = min_multiplier; i <= max_multiplier; i++) {
voltage = (k * step) / 256 + min_voltage;
f_table[k].frequency = fsb * i;
f_table[k].index = (i << 8) | voltage;
k++;
}
f_table[k].frequency = CPUFREQ_TABLE_END;
}
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
policy->cur = fsb * current_multiplier;
ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
if (ret) {
kfree(centaur);
return ret;
}
cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
return 0;
}
static int eps_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
struct eps_cpu_data *centaur;
u32 lo, hi;
if (eps_cpu[cpu] == NULL)
return -ENODEV;
centaur = eps_cpu[cpu];
/* Get max frequency */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
/* Set max frequency */
eps_set_state(centaur, cpu, hi & 0xffff);
/* Bye */
cpufreq_frequency_table_put_attr(policy->cpu);
kfree(eps_cpu[cpu]);
eps_cpu[cpu] = NULL;
return 0;
}
static struct freq_attr* eps_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver eps_driver = {
.verify = eps_verify,
.target = eps_target,
.init = eps_cpu_init,
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
.owner = THIS_MODULE,
.attr = eps_attr,
};
static int __init eps_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
/* This driver will work only on Centaur C7 processors with
* Enhanced SpeedStep/PowerSaver registers */
if (c->x86_vendor != X86_VENDOR_CENTAUR
|| c->x86 != 6 || c->x86_model != 10)
return -ENODEV;
if (!cpu_has(c, X86_FEATURE_EST))
return -ENODEV;
if (cpufreq_register_driver(&eps_driver))
return -EINVAL;
return 0;
}
static void __exit eps_exit(void)
{
cpufreq_unregister_driver(&eps_driver);
}
MODULE_AUTHOR("Rafa Bilski <rafalbilski@interia.pl>");
MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's.");
MODULE_LICENSE("GPL");
module_init(eps_init);
module_exit(eps_exit);
This diff is collapsed.
......@@ -235,84 +235,14 @@ static int __initdata ezrat_eblcr[32] = {
/*
* VIA C3 Nehemiah */
static int __initdata nehemiah_a_clock_ratio[32] = {
static int __initdata nehemiah_clock_ratio[32] = {
100, /* 0000 -> 10.0x */
160, /* 0001 -> 16.0x */
-1, /* 0010 -> RESERVED */
90, /* 0011 -> 9.0x */
95, /* 0100 -> 9.5x */
-1, /* 0101 -> RESERVED */
-1, /* 0110 -> RESERVED */
55, /* 0111 -> 5.5x */
60, /* 1000 -> 6.0x */
70, /* 1001 -> 7.0x */
80, /* 1010 -> 8.0x */
50, /* 1011 -> 5.0x */
65, /* 1100 -> 6.5x */
75, /* 1101 -> 7.5x */
85, /* 1110 -> 8.5x */
120, /* 1111 -> 12.0x */
100, /* 0000 -> 10.0x */
-1, /* 0001 -> RESERVED */
120, /* 0010 -> 12.0x */
90, /* 0011 -> 9.0x */
105, /* 0100 -> 10.5x */
115, /* 0101 -> 11.5x */
125, /* 0110 -> 12.5x */
135, /* 0111 -> 13.5x */
140, /* 1000 -> 14.0x */
150, /* 1001 -> 15.0x */
160, /* 1010 -> 16.0x */
130, /* 1011 -> 13.0x */
145, /* 1100 -> 14.5x */
155, /* 1101 -> 15.5x */
-1, /* 1110 -> RESERVED (13.0x) */
120, /* 1111 -> 12.0x */
};
static int __initdata nehemiah_b_clock_ratio[32] = {
100, /* 0000 -> 10.0x */
160, /* 0001 -> 16.0x */
-1, /* 0010 -> RESERVED */
90, /* 0011 -> 9.0x */
95, /* 0100 -> 9.5x */
-1, /* 0101 -> RESERVED */
-1, /* 0110 -> RESERVED */
55, /* 0111 -> 5.5x */
60, /* 1000 -> 6.0x */
70, /* 1001 -> 7.0x */
80, /* 1010 -> 8.0x */
50, /* 1011 -> 5.0x */
65, /* 1100 -> 6.5x */
75, /* 1101 -> 7.5x */
85, /* 1110 -> 8.5x */
120, /* 1111 -> 12.0x */
100, /* 0000 -> 10.0x */
110, /* 0001 -> 11.0x */
120, /* 0010 -> 12.0x */
90, /* 0011 -> 9.0x */
105, /* 0100 -> 10.5x */
115, /* 0101 -> 11.5x */
125, /* 0110 -> 12.5x */
135, /* 0111 -> 13.5x */
140, /* 1000 -> 14.0x */
150, /* 1001 -> 15.0x */
160, /* 1010 -> 16.0x */
130, /* 1011 -> 13.0x */
145, /* 1100 -> 14.5x */
155, /* 1101 -> 15.5x */
-1, /* 1110 -> RESERVED (13.0x) */
120, /* 1111 -> 12.0x */
};
static int __initdata nehemiah_c_clock_ratio[32] = {
100, /* 0000 -> 10.0x */
160, /* 0001 -> 16.0x */
40, /* 0010 -> RESERVED */
40, /* 0010 -> 4.0x */
90, /* 0011 -> 9.0x */
95, /* 0100 -> 9.5x */
-1, /* 0101 -> RESERVED */
45, /* 0110 -> RESERVED */
45, /* 0110 -> 4.5x */
55, /* 0111 -> 5.5x */
60, /* 1000 -> 6.0x */
70, /* 1001 -> 7.0x */
......@@ -340,84 +270,14 @@ static int __initdata nehemiah_c_clock_ratio[32] = {
120, /* 1111 -> 12.0x */
};
static int __initdata nehemiah_a_eblcr[32] = {
50, /* 0000 -> 5.0x */
160, /* 0001 -> 16.0x */
-1, /* 0010 -> RESERVED */
100, /* 0011 -> 10.0x */
55, /* 0100 -> 5.5x */
-1, /* 0101 -> RESERVED */
-1, /* 0110 -> RESERVED */
95, /* 0111 -> 9.5x */
90, /* 1000 -> 9.0x */
70, /* 1001 -> 7.0x */
80, /* 1010 -> 8.0x */
60, /* 1011 -> 6.0x */
120, /* 1100 -> 12.0x */
75, /* 1101 -> 7.5x */
85, /* 1110 -> 8.5x */
65, /* 1111 -> 6.5x */
90, /* 0000 -> 9.0x */
-1, /* 0001 -> RESERVED */
120, /* 0010 -> 12.0x */
100, /* 0011 -> 10.0x */
135, /* 0100 -> 13.5x */
115, /* 0101 -> 11.5x */
125, /* 0110 -> 12.5x */
105, /* 0111 -> 10.5x */
130, /* 1000 -> 13.0x */
150, /* 1001 -> 15.0x */
160, /* 1010 -> 16.0x */
140, /* 1011 -> 14.0x */
120, /* 1100 -> 12.0x */
155, /* 1101 -> 15.5x */
-1, /* 1110 -> RESERVED (13.0x) */
145 /* 1111 -> 14.5x */
/* end of table */
};
static int __initdata nehemiah_b_eblcr[32] = {
50, /* 0000 -> 5.0x */
160, /* 0001 -> 16.0x */
-1, /* 0010 -> RESERVED */
100, /* 0011 -> 10.0x */
55, /* 0100 -> 5.5x */
-1, /* 0101 -> RESERVED */
-1, /* 0110 -> RESERVED */
95, /* 0111 -> 9.5x */
90, /* 1000 -> 9.0x */
70, /* 1001 -> 7.0x */
80, /* 1010 -> 8.0x */
60, /* 1011 -> 6.0x */
120, /* 1100 -> 12.0x */
75, /* 1101 -> 7.5x */
85, /* 1110 -> 8.5x */
65, /* 1111 -> 6.5x */
90, /* 0000 -> 9.0x */
110, /* 0001 -> 11.0x */
120, /* 0010 -> 12.0x */
100, /* 0011 -> 10.0x */
135, /* 0100 -> 13.5x */
115, /* 0101 -> 11.5x */
125, /* 0110 -> 12.5x */
105, /* 0111 -> 10.5x */
130, /* 1000 -> 13.0x */
150, /* 1001 -> 15.0x */
160, /* 1010 -> 16.0x */
140, /* 1011 -> 14.0x */
120, /* 1100 -> 12.0x */
155, /* 1101 -> 15.5x */
-1, /* 1110 -> RESERVED (13.0x) */
145 /* 1111 -> 14.5x */
/* end of table */
};
static int __initdata nehemiah_c_eblcr[32] = {
static int __initdata nehemiah_eblcr[32] = {
50, /* 0000 -> 5.0x */
160, /* 0001 -> 16.0x */
40, /* 0010 -> RESERVED */
40, /* 0010 -> 4.0x */
100, /* 0011 -> 10.0x */
55, /* 0100 -> 5.5x */
-1, /* 0101 -> RESERVED */
45, /* 0110 -> RESERVED */
45, /* 0110 -> 4.5x */
95, /* 0111 -> 9.5x */
90, /* 1000 -> 9.0x */
70, /* 1001 -> 7.0x */
......@@ -443,7 +303,6 @@ static int __initdata nehemiah_c_eblcr[32] = {
155, /* 1101 -> 15.5x */
-1, /* 1110 -> RESERVED (13.0x) */
145 /* 1111 -> 14.5x */
/* end of table */
};
/*
......
......@@ -1289,8 +1289,12 @@ static unsigned int powernowk8_get (unsigned int cpu)
if (query_current_values_with_pending_wait(data))
goto out;
if (cpu_family == CPU_HW_PSTATE)
khz = find_khz_freq_from_fiddid(data->currfid, data->currdid);
else
khz = find_khz_freq_from_fid(data->currfid);
out:
set_cpus_allowed(current, oldmask);
return khz;
......
......@@ -16,7 +16,7 @@ config CPU_FREQ
if CPU_FREQ
config CPU_FREQ_TABLE
def_tristate m
tristate
config CPU_FREQ_DEBUG
bool "Enable CPUfreq debugging"
......
This diff is collapsed.
......@@ -429,14 +429,12 @@ static void dbs_check_cpu(int cpu)
static void do_dbs_timer(struct work_struct *work)
{
int i;
lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
unlock_cpu_hotplug();
}
static inline void dbs_timer_init(void)
......
......@@ -52,19 +52,20 @@ static unsigned int def_sampling_rate;
static void do_dbs_timer(struct work_struct *work);
/* Sampling types */
enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_wall;
struct cpufreq_policy *cur_policy;
struct delayed_work work;
enum dbs_sample sample_type;
unsigned int enable;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
unsigned int freq_lo_jiffies;
unsigned int freq_hi_jiffies;
int cpu;
unsigned int enable:1,
sample_type:1;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
......@@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
if (load < (dbs_tuners_ins.up_threshold - 10)) {
unsigned int freq_next, freq_cur;
freq_cur = cpufreq_driver_getavg(policy);
freq_cur = __cpufreq_driver_getavg(policy);
if (!freq_cur)
freq_cur = policy->cur;
......@@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
static void do_dbs_timer(struct work_struct *work)
{
unsigned int cpu = smp_processor_id();
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
enum dbs_sample sample_type = dbs_info->sample_type;
struct cpu_dbs_info_s *dbs_info =
container_of(work, struct cpu_dbs_info_s, work.work);
unsigned int cpu = dbs_info->cpu;
int sample_type = dbs_info->sample_type;
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
......@@ -434,15 +437,19 @@ static void do_dbs_timer(struct work_struct *work)
delay -= jiffies % delay;
if (!dbs_info->enable)
if (lock_policy_rwsem_write(cpu) < 0)
return;
if (!dbs_info->enable) {
unlock_policy_rwsem_write(cpu);
return;
}
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
if (!dbs_tuners_ins.powersave_bias ||
sample_type == DBS_NORMAL_SAMPLE) {
lock_cpu_hotplug();
dbs_check_cpu(dbs_info);
unlock_cpu_hotplug();
if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
......@@ -454,26 +461,27 @@ static void do_dbs_timer(struct work_struct *work)
CPUFREQ_RELATION_H);
}
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
unlock_policy_rwsem_write(cpu);
}
static inline void dbs_timer_init(unsigned int cpu)
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
{
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
delay -= jiffies % delay;
dbs_info->enable = 1;
ondemand_powersave_bias_init();
INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
dbs_info->enable = 0;
cancel_delayed_work(&dbs_info->work);
flush_workqueue(kondemand_wq);
}
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
......@@ -502,21 +510,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
dbs_enable++;
if (dbs_enable == 1) {
kondemand_wq = create_workqueue("kondemand");
if (!kondemand_wq) {
printk(KERN_ERR
"Creation of kondemand failed\n");
dbs_enable--;
mutex_unlock(&dbs_mutex);
return -ENOSPC;
}
}
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
if (rc) {
if (dbs_enable == 1)
destroy_workqueue(kondemand_wq);
dbs_enable--;
mutex_unlock(&dbs_mutex);
return rc;
......@@ -530,7 +526,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
j_dbs_info->prev_cpu_wall = get_jiffies_64();
}
this_dbs_info->enable = 1;
this_dbs_info->cpu = cpu;
/*
* Start the timerschedule work, when this governor
* is used for first time
......@@ -550,7 +546,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_tuners_ins.sampling_rate = def_sampling_rate;
}
dbs_timer_init(policy->cpu);
dbs_timer_init(this_dbs_info);
mutex_unlock(&dbs_mutex);
break;
......@@ -560,9 +556,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
dbs_enable--;
if (dbs_enable == 0)
destroy_workqueue(kondemand_wq);
mutex_unlock(&dbs_mutex);
break;
......@@ -591,12 +584,18 @@ static struct cpufreq_governor cpufreq_gov_dbs = {
static int __init cpufreq_gov_dbs_init(void)
{
kondemand_wq = create_workqueue("kondemand");
if (!kondemand_wq) {
printk(KERN_ERR "Creation of kondemand failed\n");
return -EFAULT;
}
return cpufreq_register_governor(&cpufreq_gov_dbs);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_dbs);
destroy_workqueue(kondemand_wq);
}
......@@ -608,3 +607,4 @@ MODULE_LICENSE("GPL");
module_init(cpufreq_gov_dbs_init);
module_exit(cpufreq_gov_dbs_exit);
......@@ -370,12 +370,10 @@ __exit cpufreq_stats_exit(void)
cpufreq_unregister_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
lock_cpu_hotplug();
for_each_online_cpu(cpu) {
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
CPU_DEAD, (void *)(long)cpu);
}
unlock_cpu_hotplug();
}
MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
......
......@@ -71,7 +71,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
lock_cpu_hotplug();
mutex_lock(&userspace_mutex);
if (!cpu_is_managed[policy->cpu])
goto err;
......@@ -94,7 +93,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
err:
mutex_unlock(&userspace_mutex);
unlock_cpu_hotplug();
return ret;
}
......
......@@ -84,9 +84,6 @@ struct cpufreq_policy {
unsigned int policy; /* see above */
struct cpufreq_governor *governor; /* see below */
struct mutex lock; /* CPU ->setpolicy or ->target may
only be called once a time */
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
......@@ -172,11 +169,16 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int relation);
extern int cpufreq_driver_getavg(struct cpufreq_policy *policy);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
int lock_policy_rwsem_read(int cpu);
int lock_policy_rwsem_write(int cpu);
void unlock_policy_rwsem_read(int cpu);
void unlock_policy_rwsem_write(int cpu);
/*********************************************************************
* CPUFREQ DRIVER INTERFACE *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment