Commit 3d8f74dd authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Stop setting PREEMPT_ACTIVE

Now that nothing tests for PREEMPT_ACTIVE anymore, stop setting it.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Reviewed-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c73464b1
...@@ -146,18 +146,6 @@ extern void preempt_count_sub(int val); ...@@ -146,18 +146,6 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1) #define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1) #define preempt_count_dec() preempt_count_sub(1)
#define preempt_active_enter() \
do { \
preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
barrier(); \
} while (0)
#define preempt_active_exit() \
do { \
barrier(); \
preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
} while (0)
#ifdef CONFIG_PREEMPT_COUNT #ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \ #define preempt_disable() \
......
...@@ -3201,9 +3201,9 @@ void __sched schedule_preempt_disabled(void) ...@@ -3201,9 +3201,9 @@ void __sched schedule_preempt_disabled(void)
static void __sched notrace preempt_schedule_common(void) static void __sched notrace preempt_schedule_common(void)
{ {
do { do {
preempt_active_enter(); preempt_disable();
__schedule(true); __schedule(true);
preempt_active_exit(); sched_preempt_enable_no_resched();
/* /*
* Check again in case we missed a preemption opportunity * Check again in case we missed a preemption opportunity
...@@ -3254,13 +3254,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ...@@ -3254,13 +3254,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
return; return;
do { do {
/* preempt_disable_notrace();
* Use raw __prempt_count() ops that don't call function.
* We can't call functions before disabling preemption which
* disarm preemption tracing recursions.
*/
__preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
barrier();
/* /*
* Needs preempt disabled in case user_exit() is traced * Needs preempt disabled in case user_exit() is traced
* and the tracer calls preempt_enable_notrace() causing * and the tracer calls preempt_enable_notrace() causing
...@@ -3270,8 +3264,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ...@@ -3270,8 +3264,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
__schedule(true); __schedule(true);
exception_exit(prev_ctx); exception_exit(prev_ctx);
barrier(); preempt_enable_no_resched_notrace();
__preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
} while (need_resched()); } while (need_resched());
} }
EXPORT_SYMBOL_GPL(preempt_schedule_notrace); EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
...@@ -3294,11 +3287,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) ...@@ -3294,11 +3287,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
prev_state = exception_enter(); prev_state = exception_enter();
do { do {
preempt_active_enter(); preempt_disable();
local_irq_enable(); local_irq_enable();
__schedule(true); __schedule(true);
local_irq_disable(); local_irq_disable();
preempt_active_exit(); sched_preempt_enable_no_resched();
} while (need_resched()); } while (need_resched());
exception_exit(prev_state); exception_exit(prev_state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment