Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
0bb065f2
Commit
0bb065f2
authored
Mar 27, 2006
by
Dave Jones
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/brodo/cpufreq-2.6
parents
329b10bb
7c9d8c0e
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
71 additions
and
91 deletions
+71
-91
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_conservative.c
+63
-88
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_ondemand.c
+8
-3
No files found.
drivers/cpufreq/cpufreq_conservative.c
View file @
0bb065f2
...
...
@@ -35,12 +35,7 @@
*/
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define MIN_FREQUENCY_UP_THRESHOLD (0)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
/*
* The polling frequency of this governor depends on the capability of
...
...
@@ -53,10 +48,14 @@
* All times here are in uS.
*/
static
unsigned
int
def_sampling_rate
;
#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
#define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000)
#define DEF_SAMPLING_DOWN_FACTOR (5)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
static
void
do_dbs_timer
(
void
*
data
);
...
...
@@ -66,6 +65,8 @@ struct cpu_dbs_info_s {
unsigned
int
prev_cpu_idle_up
;
unsigned
int
prev_cpu_idle_down
;
unsigned
int
enable
;
unsigned
int
down_skip
;
unsigned
int
requested_freq
;
};
static
DEFINE_PER_CPU
(
struct
cpu_dbs_info_s
,
cpu_dbs_info
);
...
...
@@ -136,7 +137,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
unsigned
int
input
;
int
ret
;
ret
=
sscanf
(
buf
,
"%u"
,
&
input
);
if
(
ret
!=
1
)
if
(
ret
!=
1
||
input
>
MAX_SAMPLING_DOWN_FACTOR
||
input
<
1
)
return
-
EINVAL
;
mutex_lock
(
&
dbs_mutex
);
...
...
@@ -173,8 +174,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
ret
=
sscanf
(
buf
,
"%u"
,
&
input
);
mutex_lock
(
&
dbs_mutex
);
if
(
ret
!=
1
||
input
>
MAX_FREQUENCY_UP_THRESHOLD
||
input
<
MIN_FREQUENCY_UP_THRESHOLD
||
if
(
ret
!=
1
||
input
>
100
||
input
<
0
||
input
<=
dbs_tuners_ins
.
down_threshold
)
{
mutex_unlock
(
&
dbs_mutex
);
return
-
EINVAL
;
...
...
@@ -194,8 +194,7 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
ret
=
sscanf
(
buf
,
"%u"
,
&
input
);
mutex_lock
(
&
dbs_mutex
);
if
(
ret
!=
1
||
input
>
MAX_FREQUENCY_DOWN_THRESHOLD
||
input
<
MIN_FREQUENCY_DOWN_THRESHOLD
||
if
(
ret
!=
1
||
input
>
100
||
input
<
0
||
input
>=
dbs_tuners_ins
.
up_threshold
)
{
mutex_unlock
(
&
dbs_mutex
);
return
-
EINVAL
;
...
...
@@ -297,31 +296,17 @@ static struct attribute_group dbs_attr_group = {
static
void
dbs_check_cpu
(
int
cpu
)
{
unsigned
int
idle_ticks
,
up_idle_ticks
,
down_idle_ticks
;
unsigned
int
tmp_idle_ticks
,
total_idle_ticks
;
unsigned
int
freq_step
;
unsigned
int
freq_down_sampling_rate
;
static
int
down_skip
[
NR_CPUS
];
static
int
requested_freq
[
NR_CPUS
];
static
unsigned
short
init_flag
=
0
;
struct
cpu_dbs_info_s
*
this_dbs_info
;
struct
cpu_dbs_info_s
*
dbs_info
;
struct
cpu_dbs_info_s
*
this_dbs_info
=
&
per_cpu
(
cpu_dbs_info
,
cpu
);
struct
cpufreq_policy
*
policy
;
unsigned
int
j
;
this_dbs_info
=
&
per_cpu
(
cpu_dbs_info
,
cpu
);
if
(
!
this_dbs_info
->
enable
)
return
;
policy
=
this_dbs_info
->
cur_policy
;
if
(
init_flag
==
0
)
{
for_each_online_cpu
(
j
)
{
dbs_info
=
&
per_cpu
(
cpu_dbs_info
,
j
);
requested_freq
[
j
]
=
dbs_info
->
cur_policy
->
cur
;
}
init_flag
=
1
;
}
/*
* The default safe range is 20% to 80%
* Every sampling_rate, we check
...
...
@@ -337,39 +322,29 @@ static void dbs_check_cpu(int cpu)
*/
/* Check for frequency increase */
idle_ticks
=
UINT_MAX
;
for_each_cpu_mask
(
j
,
policy
->
cpus
)
{
unsigned
int
tmp_idle_ticks
,
total_idle_ticks
;
struct
cpu_dbs_info_s
*
j_dbs_info
;
j_dbs_info
=
&
per_cpu
(
cpu_dbs_info
,
j
);
/* Check for frequency increase */
total_idle_ticks
=
get_cpu_idle_time
(
j
);
tmp_idle_ticks
=
total_idle_ticks
-
j_dbs_info
->
prev_cpu_idle_up
;
j_dbs_info
->
prev_cpu_idle_up
=
total_idle_ticks
;
if
(
tmp_idle_ticks
<
idle_ticks
)
idle_ticks
=
tmp_idle_ticks
;
}
/* Check for frequency increase */
total_idle_ticks
=
get_cpu_idle_time
(
cpu
);
tmp_idle_ticks
=
total_idle_ticks
-
this_dbs_info
->
prev_cpu_idle_up
;
this_dbs_info
->
prev_cpu_idle_up
=
total_idle_ticks
;
if
(
tmp_idle_ticks
<
idle_ticks
)
idle_ticks
=
tmp_idle_ticks
;
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks
*=
100
;
up_idle_ticks
=
(
100
-
dbs_tuners_ins
.
up_threshold
)
*
usecs_to_jiffies
(
dbs_tuners_ins
.
sampling_rate
);
usecs_to_jiffies
(
dbs_tuners_ins
.
sampling_rate
);
if
(
idle_ticks
<
up_idle_ticks
)
{
down_skip
[
cpu
]
=
0
;
for_each_cpu_mask
(
j
,
policy
->
cpus
)
{
struct
cpu_dbs_info_s
*
j_dbs_info
;
this_dbs_info
->
down_skip
=
0
;
this_dbs_info
->
prev_cpu_idle_down
=
this_dbs_info
->
prev_cpu_idle_up
;
j_dbs_info
=
&
per_cpu
(
cpu_dbs_info
,
j
);
j_dbs_info
->
prev_cpu_idle_down
=
j_dbs_info
->
prev_cpu_idle_up
;
}
/* if we are already at full speed then break out early */
if
(
requested_freq
[
cpu
]
==
policy
->
max
)
if
(
this_dbs_info
->
requested_freq
==
policy
->
max
)
return
;
freq_step
=
(
dbs_tuners_ins
.
freq_step
*
policy
->
max
)
/
100
;
...
...
@@ -378,49 +353,45 @@ static void dbs_check_cpu(int cpu)
if
(
unlikely
(
freq_step
==
0
))
freq_step
=
5
;
requested_freq
[
cpu
]
+=
freq_step
;
if
(
requested_freq
[
cpu
]
>
policy
->
max
)
requested_freq
[
cpu
]
=
policy
->
max
;
this_dbs_info
->
requested_freq
+=
freq_step
;
if
(
this_dbs_info
->
requested_freq
>
policy
->
max
)
this_dbs_info
->
requested_freq
=
policy
->
max
;
__cpufreq_driver_target
(
policy
,
requested_freq
[
cpu
],
__cpufreq_driver_target
(
policy
,
this_dbs_info
->
requested_freq
,
CPUFREQ_RELATION_H
);
return
;
}
/* Check for frequency decrease */
down_skip
[
cpu
]
++
;
if
(
down_skip
[
cpu
]
<
dbs_tuners_ins
.
sampling_down_factor
)
this_dbs_info
->
down_skip
++
;
if
(
this_dbs_info
->
down_skip
<
dbs_tuners_ins
.
sampling_down_factor
)
return
;
idle_ticks
=
UINT_MAX
;
for_each_cpu_mask
(
j
,
policy
->
cpus
)
{
unsigned
int
tmp_idle_ticks
,
total_idle_ticks
;
struct
cpu_dbs_info_s
*
j_dbs_info
;
/* Check for frequency decrease */
total_idle_ticks
=
this_dbs_info
->
prev_cpu_idle_up
;
tmp_idle_ticks
=
total_idle_ticks
-
this_dbs_info
->
prev_cpu_idle_down
;
this_dbs_info
->
prev_cpu_idle_down
=
total_idle_ticks
;
j_dbs_info
=
&
per_cpu
(
cpu_dbs_info
,
j
);
total_idle_ticks
=
j_dbs_info
->
prev_cpu_idle_up
;
tmp_idle_ticks
=
total_idle_ticks
-
j_dbs_info
->
prev_cpu_idle_down
;
j_dbs_info
->
prev_cpu_idle_down
=
total_idle_ticks
;
if
(
tmp_idle_ticks
<
idle_ticks
)
idle_ticks
=
tmp_idle_ticks
;
}
if
(
tmp_idle_ticks
<
idle_ticks
)
idle_ticks
=
tmp_idle_ticks
;
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks
*=
100
;
down_skip
[
cpu
]
=
0
;
this_dbs_info
->
down_skip
=
0
;
freq_down_sampling_rate
=
dbs_tuners_ins
.
sampling_rate
*
dbs_tuners_ins
.
sampling_down_factor
;
down_idle_ticks
=
(
100
-
dbs_tuners_ins
.
down_threshold
)
*
usecs_to_jiffies
(
freq_down_sampling_rate
);
usecs_to_jiffies
(
freq_down_sampling_rate
);
if
(
idle_ticks
>
down_idle_ticks
)
{
/* if we are already at the lowest speed then break out early
/*
* if we are already at the lowest speed then break out early
* or if we 'cannot' reduce the speed as the user might want
* freq_step to be zero */
if
(
requested_freq
[
cpu
]
==
policy
->
min
* freq_step to be zero
*/
if
(
this_dbs_info
->
requested_freq
==
policy
->
min
||
dbs_tuners_ins
.
freq_step
==
0
)
return
;
...
...
@@ -430,13 +401,12 @@ static void dbs_check_cpu(int cpu)
if
(
unlikely
(
freq_step
==
0
))
freq_step
=
5
;
requested_freq
[
cpu
]
-=
freq_step
;
if
(
requested_freq
[
cpu
]
<
policy
->
min
)
requested_freq
[
cpu
]
=
policy
->
min
;
this_dbs_info
->
requested_freq
-=
freq_step
;
if
(
this_dbs_info
->
requested_freq
<
policy
->
min
)
this_dbs_info
->
requested_freq
=
policy
->
min
;
__cpufreq_driver_target
(
policy
,
requested_freq
[
cpu
],
CPUFREQ_RELATION_H
);
__cpufreq_driver_target
(
policy
,
this_dbs_info
->
requested_freq
,
CPUFREQ_RELATION_H
);
return
;
}
}
...
...
@@ -493,11 +463,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info
=
&
per_cpu
(
cpu_dbs_info
,
j
);
j_dbs_info
->
cur_policy
=
policy
;
j_dbs_info
->
prev_cpu_idle_up
=
get_cpu_idle_time
(
j
);
j_dbs_info
->
prev_cpu_idle_up
=
get_cpu_idle_time
(
cpu
);
j_dbs_info
->
prev_cpu_idle_down
=
j_dbs_info
->
prev_cpu_idle_up
;
}
this_dbs_info
->
enable
=
1
;
this_dbs_info
->
down_skip
=
0
;
this_dbs_info
->
requested_freq
=
policy
->
cur
;
sysfs_create_group
(
&
policy
->
kobj
,
&
dbs_attr_group
);
dbs_enable
++
;
/*
...
...
@@ -507,13 +479,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if
(
dbs_enable
==
1
)
{
unsigned
int
latency
;
/* policy latency is in nS. Convert it to uS first */
latency
=
policy
->
cpuinfo
.
transition_latency
/
1000
;
if
(
latency
==
0
)
latency
=
1
;
latency
=
policy
->
cpuinfo
.
transition_latency
;
if
(
latency
<
1000
)
latency
=
1000
;
def_sampling_rate
=
(
latency
/
1000
)
*
def_sampling_rate
=
10
*
latency
*
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
if
(
def_sampling_rate
<
MIN_STAT_SAMPLING_RATE
)
def_sampling_rate
=
MIN_STAT_SAMPLING_RATE
;
dbs_tuners_ins
.
sampling_rate
=
def_sampling_rate
;
dbs_tuners_ins
.
ignore_nice
=
0
;
dbs_tuners_ins
.
freq_step
=
5
;
...
...
drivers/cpufreq/cpufreq_ondemand.c
View file @
0bb065f2
...
...
@@ -84,6 +84,7 @@ struct dbs_tuners {
static
struct
dbs_tuners
dbs_tuners_ins
=
{
.
up_threshold
=
DEF_FREQUENCY_UP_THRESHOLD
,
.
sampling_down_factor
=
DEF_SAMPLING_DOWN_FACTOR
,
.
ignore_nice
=
0
,
};
static
inline
unsigned
int
get_cpu_idle_time
(
unsigned
int
cpu
)
...
...
@@ -350,6 +351,9 @@ static void dbs_check_cpu(int cpu)
freq_next
=
(
freq_next
*
policy
->
cur
)
/
(
dbs_tuners_ins
.
up_threshold
-
10
);
if
(
freq_next
<
policy
->
min
)
freq_next
=
policy
->
min
;
if
(
freq_next
<=
((
policy
->
cur
*
95
)
/
100
))
__cpufreq_driver_target
(
policy
,
freq_next
,
CPUFREQ_RELATION_L
);
}
...
...
@@ -395,8 +399,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return
-
EINVAL
;
if
(
policy
->
cpuinfo
.
transition_latency
>
(
TRANSITION_LATENCY_LIMIT
*
1000
))
(
TRANSITION_LATENCY_LIMIT
*
1000
))
{
printk
(
KERN_WARNING
"ondemand governor failed to load "
"due to too long transition latency
\n
"
);
return
-
EINVAL
;
}
if
(
this_dbs_info
->
enable
)
/* Already enabled */
break
;
...
...
@@ -431,8 +438,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
def_sampling_rate
=
MIN_STAT_SAMPLING_RATE
;
dbs_tuners_ins
.
sampling_rate
=
def_sampling_rate
;
dbs_tuners_ins
.
ignore_nice
=
0
;
dbs_timer_init
();
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment