Commit a7a4f8a7 authored by Paul Turner's avatar Paul Turner Committed by Ingo Molnar

sched: Add sysctl_sched_shares_window

Introduce a new sysctl for the shares window and disambiguate it from
sched_time_avg.

A 10ms window appears to be a good compromise between accuracy and performance.
Signed-off-by: default avatarPaul Turner <pjt@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234938.112173964@google.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 67e86250
...@@ -1900,6 +1900,7 @@ extern unsigned int sysctl_sched_migration_cost; ...@@ -1900,6 +1900,7 @@ extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate; extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg; extern unsigned int sysctl_sched_time_avg;
extern unsigned int sysctl_timer_migration; extern unsigned int sysctl_timer_migration;
extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write, int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, void __user *buffer, size_t *length,
......
...@@ -89,6 +89,13 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; ...@@ -89,6 +89,13 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
/*
* The exponential sliding window over which load is averaged for shares
* distribution.
* (default: 10msec)
*/
unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
static const struct sched_class fair_sched_class; static const struct sched_class fair_sched_class;
/************************************************************** /**************************************************************
...@@ -688,7 +695,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -688,7 +695,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
static void update_cfs_load(struct cfs_rq *cfs_rq) static void update_cfs_load(struct cfs_rq *cfs_rq)
{ {
u64 period = sched_avg_period(); u64 period = sysctl_sched_shares_window;
u64 now, delta; u64 now, delta;
unsigned long load = cfs_rq->load.weight; unsigned long load = cfs_rq->load.weight;
......
...@@ -332,6 +332,13 @@ static struct ctl_table kern_table[] = { ...@@ -332,6 +332,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
{
.procname = "sched_shares_window",
.data = &sysctl_sched_shares_window,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ {
.procname = "timer_migration", .procname = "timer_migration",
.data = &sysctl_timer_migration, .data = &sysctl_timer_migration,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment