Commit 967fc046 authored by Gregory Haskins's avatar Gregory Haskins

sched: add sched_class->needs_post_schedule() member

We currently run class->post_schedule() outside of the rq->lock, which
means that we need to test for the need to post_schedule outside of
the lock to avoid a forced reacquistion.  This is currently not a problem
as we only look at rq->rt.overloaded.  However, we want to enhance this
going forward to look at more state to reduce the need to post_schedule to
a bare minimum set.  Therefore, we introduce a new member-func called
needs_post_schedule() which tests for the post_schedule condtion without
actually performing the work.  Therefore it is safe to call this
function before the rq->lock is released, because we are guaranteed not
to drop the lock at an intermediate point (such as what post_schedule()
may do).

We will use this later in the series

[ rostedt: removed paranoid BUG_ON ]
Signed-off-by: default avatarGregory Haskins <ghaskins@novell.com>
parent 8f45e2b5
......@@ -1012,6 +1012,7 @@ struct sched_class {
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
int (*needs_post_schedule) (struct rq *this_rq);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
......
......@@ -2621,6 +2621,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
#ifdef CONFIG_SMP
int post_schedule = 0;
if (current->sched_class->needs_post_schedule)
post_schedule = current->sched_class->needs_post_schedule(rq);
#endif
rq->prev_mm = NULL;
......@@ -2639,7 +2645,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
finish_arch_switch(prev);
finish_lock_switch(rq, prev);
#ifdef CONFIG_SMP
if (current->sched_class->post_schedule)
if (post_schedule)
current->sched_class->post_schedule(rq);
#endif
......
......@@ -1290,20 +1290,23 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
pull_rt_task(rq);
}
/*
* assumes rq->lock is held
*/
static int needs_post_schedule_rt(struct rq *rq)
{
return rq->rt.overloaded ? 1 : 0;
}
static void post_schedule_rt(struct rq *rq)
{
/*
* If we have more than one rt_task queued, then
* see if we can push the other rt_tasks off to other CPUS.
* Note we may release the rq lock, and since
* the lock was owned by prev, we need to release it
* first via finish_lock_switch and then reaquire it here.
* This is only called if needs_post_schedule_rt() indicates that
* we need to push tasks away
*/
if (unlikely(rq->rt.overloaded)) {
spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
}
spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
}
/*
......@@ -1557,6 +1560,7 @@ static const struct sched_class rt_sched_class = {
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
.needs_post_schedule = needs_post_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,
.switched_from = switched_from_rt,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment