Commit 246d86b5 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Ingo Molnar

sched: Simplify the reacquire_kernel_lock() logic

- Contrary to what 6d558c3a says, there is no need to reload
  prev = rq->curr after the context switch. You always schedule
  back to where you came from, prev must be equal to current
  even if cpu/rq was changed.

- This also means reacquire_kernel_lock() can use prev instead
  of current.

- No need to reassign switch_count if reacquire_kernel_lock()
  reports need_resched(), we can just move the initial assignment
  down, under the "need_resched_nonpreemptible:" label.

- Try to update the comment after context_switch().
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100519125711.GA30199@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c676329a
...@@ -3636,7 +3636,6 @@ asmlinkage void __sched schedule(void) ...@@ -3636,7 +3636,6 @@ asmlinkage void __sched schedule(void)
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
rcu_note_context_switch(cpu); rcu_note_context_switch(cpu);
prev = rq->curr; prev = rq->curr;
switch_count = &prev->nivcsw;
release_kernel_lock(prev); release_kernel_lock(prev);
need_resched_nonpreemptible: need_resched_nonpreemptible:
...@@ -3649,6 +3648,7 @@ asmlinkage void __sched schedule(void) ...@@ -3649,6 +3648,7 @@ asmlinkage void __sched schedule(void)
raw_spin_lock_irq(&rq->lock); raw_spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev); clear_tsk_need_resched(prev);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) { if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING; prev->state = TASK_RUNNING;
...@@ -3689,8 +3689,10 @@ asmlinkage void __sched schedule(void) ...@@ -3689,8 +3689,10 @@ asmlinkage void __sched schedule(void)
context_switch(rq, prev, next); /* unlocks the rq */ context_switch(rq, prev, next); /* unlocks the rq */
/* /*
* the context switch might have flipped the stack from under * The context switch have flipped the stack from under us
* us, hence refresh the local variables. * and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/ */
cpu = smp_processor_id(); cpu = smp_processor_id();
rq = cpu_rq(cpu); rq = cpu_rq(cpu);
...@@ -3699,11 +3701,8 @@ asmlinkage void __sched schedule(void) ...@@ -3699,11 +3701,8 @@ asmlinkage void __sched schedule(void)
post_schedule(rq); post_schedule(rq);
if (unlikely(reacquire_kernel_lock(current) < 0)) { if (unlikely(reacquire_kernel_lock(prev)))
prev = rq->curr;
switch_count = &prev->nivcsw;
goto need_resched_nonpreemptible; goto need_resched_nonpreemptible;
}
preempt_enable_no_resched(); preempt_enable_no_resched();
if (need_resched()) if (need_resched())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment