Commit fc13aeba authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Add preempt argument to __schedule()

There is only a single PREEMPT_ACTIVE use in the regular __schedule()
path and that is to circumvent the task->state check. Since the code
setting PREEMPT_ACTIVE is the immediate caller of __schedule() we can
replace this with a function argument.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 609ca066
...@@ -3056,7 +3056,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev) ...@@ -3056,7 +3056,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
* *
* WARNING: must be called with preemption disabled! * WARNING: must be called with preemption disabled!
*/ */
static void __sched __schedule(void) static void __sched __schedule(bool preempt)
{ {
struct task_struct *prev, *next; struct task_struct *prev, *next;
unsigned long *switch_count; unsigned long *switch_count;
...@@ -3096,7 +3096,7 @@ static void __sched __schedule(void) ...@@ -3096,7 +3096,7 @@ static void __sched __schedule(void)
rq->clock_skip_update <<= 1; /* promote REQ to ACT */ rq->clock_skip_update <<= 1; /* promote REQ to ACT */
switch_count = &prev->nivcsw; switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (!preempt && prev->state) {
if (unlikely(signal_pending_state(prev->state, prev))) { if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING; prev->state = TASK_RUNNING;
} else { } else {
...@@ -3161,7 +3161,7 @@ asmlinkage __visible void __sched schedule(void) ...@@ -3161,7 +3161,7 @@ asmlinkage __visible void __sched schedule(void)
sched_submit_work(tsk); sched_submit_work(tsk);
do { do {
preempt_disable(); preempt_disable();
__schedule(); __schedule(false);
sched_preempt_enable_no_resched(); sched_preempt_enable_no_resched();
} while (need_resched()); } while (need_resched());
} }
...@@ -3202,7 +3202,7 @@ static void __sched notrace preempt_schedule_common(void) ...@@ -3202,7 +3202,7 @@ static void __sched notrace preempt_schedule_common(void)
{ {
do { do {
preempt_active_enter(); preempt_active_enter();
__schedule(); __schedule(true);
preempt_active_exit(); preempt_active_exit();
/* /*
...@@ -3267,7 +3267,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ...@@ -3267,7 +3267,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
* an infinite recursion. * an infinite recursion.
*/ */
prev_ctx = exception_enter(); prev_ctx = exception_enter();
__schedule(); __schedule(true);
exception_exit(prev_ctx); exception_exit(prev_ctx);
barrier(); barrier();
...@@ -3296,7 +3296,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) ...@@ -3296,7 +3296,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
do { do {
preempt_active_enter(); preempt_active_enter();
local_irq_enable(); local_irq_enable();
__schedule(); __schedule(true);
local_irq_disable(); local_irq_disable();
preempt_active_exit(); preempt_active_exit();
} while (need_resched()); } while (need_resched());
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment