Commit fbdde7bd authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  debug: turn ignore_loglevel into an early param
  sched: remove unused params
  sched: let +nice tasks have smaller impact
  sched: fix high wake up latencies with FAIR_USER_SCHED
  RCU: add help text for "RCU implementation type"
parents e1a9c987 c4772d99
......@@ -775,6 +775,14 @@ config PREEMPT_NOTIFIERS
choice
prompt "RCU implementation type:"
default CLASSIC_RCU
help
This allows you to choose either the classic RCU implementation
that is designed for best read-side performance on non-realtime
systems, or the preemptible RCU implementation for best latency
on realtime systems. Note that some kernel preemption modes
will restrict your choice.
Select the default if you are unsure.
config CLASSIC_RCU
bool "Classic RCU"
......
......@@ -455,10 +455,10 @@ static int __init ignore_loglevel_setup(char *str)
ignore_loglevel = 1;
printk(KERN_INFO "debug: ignoring loglevel setting.\n");
return 1;
return 0;
}
__setup("ignore_loglevel", ignore_loglevel_setup);
early_param("ignore_loglevel", ignore_loglevel_setup);
/*
* Write out chars from start to end - 1 inclusive
......
......@@ -1255,12 +1255,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#define sched_class_highest (&rt_sched_class)
static void inc_nr_running(struct task_struct *p, struct rq *rq)
static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
}
static void dec_nr_running(struct task_struct *p, struct rq *rq)
static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
}
......@@ -1354,7 +1354,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
inc_nr_running(p, rq);
inc_nr_running(rq);
}
/*
......@@ -1366,7 +1366,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
dec_nr_running(p, rq);
dec_nr_running(rq);
}
/**
......@@ -2006,7 +2006,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
inc_nr_running(p, rq);
inc_nr_running(rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
......
......@@ -520,7 +520,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
if (sched_feat(NEW_FAIR_SLEEPERS))
vruntime -= sysctl_sched_latency;
/* ensure we never gain time by being placed backwards. */
......@@ -1106,7 +1106,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
}
gran = sysctl_sched_wakeup_granularity;
if (unlikely(se->load.weight != NICE_0_LOAD))
/*
* More easily preempt - nice tasks, while not making
* it harder for + nice tasks.
*/
if (unlikely(se->load.weight > NICE_0_LOAD))
gran = calc_delta_fair(gran, &se->load);
if (pse->vruntime + gran < se->vruntime)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment