Commit ba74c144 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

sched/rt: Document scheduler related skip-resched-check sites

Create a distinction between scheduler related preempt_enable_no_resched()
calls and the nearly one hundred other places in the kernel that do not
want to reschedule, for one reason or another.

This distinction matters for -rt, where the scheduler and the non-scheduler
preempt models (and checks) are different. For upstream it's purely
documentational.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/n/tip-gs88fvx2mdv5psnzxnv575ke@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent bd2f5536
...@@ -102,7 +102,7 @@ void cpu_idle(void) ...@@ -102,7 +102,7 @@ void cpu_idle(void)
rcu_idle_exit(); rcu_idle_exit();
tick_nohz_idle_exit(); tick_nohz_idle_exit();
if (cpu_should_die()) { if (cpu_should_die()) {
preempt_enable_no_resched(); sched_preempt_enable_no_resched();
cpu_die(); cpu_die();
} }
schedule_preempt_disabled(); schedule_preempt_disabled();
......
...@@ -106,7 +106,7 @@ void cpu_idle(void) ...@@ -106,7 +106,7 @@ void cpu_idle(void)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(cpu)) { if (cpu_is_offline(cpu)) {
preempt_enable_no_resched(); sched_preempt_enable_no_resched();
cpu_play_dead(); cpu_play_dead();
} }
#endif #endif
......
...@@ -48,12 +48,14 @@ do { \ ...@@ -48,12 +48,14 @@ do { \
barrier(); \ barrier(); \
} while (0) } while (0)
#define preempt_enable_no_resched() \ #define sched_preempt_enable_no_resched() \
do { \ do { \
barrier(); \ barrier(); \
dec_preempt_count(); \ dec_preempt_count(); \
} while (0) } while (0)
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
preempt_enable_no_resched(); \ preempt_enable_no_resched(); \
...@@ -92,6 +94,7 @@ do { \ ...@@ -92,6 +94,7 @@ do { \
#else /* !CONFIG_PREEMPT_COUNT */ #else /* !CONFIG_PREEMPT_COUNT */
#define preempt_disable() do { } while (0) #define preempt_disable() do { } while (0)
#define sched_preempt_enable_no_resched() do { } while (0)
#define preempt_enable_no_resched() do { } while (0) #define preempt_enable_no_resched() do { } while (0)
#define preempt_enable() do { } while (0) #define preempt_enable() do { } while (0)
......
...@@ -3220,7 +3220,7 @@ static void __sched __schedule(void) ...@@ -3220,7 +3220,7 @@ static void __sched __schedule(void)
post_schedule(rq); post_schedule(rq);
preempt_enable_no_resched(); sched_preempt_enable_no_resched();
if (need_resched()) if (need_resched())
goto need_resched; goto need_resched;
} }
...@@ -3253,7 +3253,7 @@ EXPORT_SYMBOL(schedule); ...@@ -3253,7 +3253,7 @@ EXPORT_SYMBOL(schedule);
*/ */
void __sched schedule_preempt_disabled(void) void __sched schedule_preempt_disabled(void)
{ {
preempt_enable_no_resched(); sched_preempt_enable_no_resched();
schedule(); schedule();
preempt_disable(); preempt_disable();
} }
...@@ -4486,7 +4486,7 @@ SYSCALL_DEFINE0(sched_yield) ...@@ -4486,7 +4486,7 @@ SYSCALL_DEFINE0(sched_yield)
__release(rq->lock); __release(rq->lock);
spin_release(&rq->lock.dep_map, 1, _THIS_IP_); spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
do_raw_spin_unlock(&rq->lock); do_raw_spin_unlock(&rq->lock);
preempt_enable_no_resched(); sched_preempt_enable_no_resched();
schedule(); schedule();
......
...@@ -353,7 +353,7 @@ void irq_exit(void) ...@@ -353,7 +353,7 @@ void irq_exit(void)
tick_nohz_irq_exit(); tick_nohz_irq_exit();
#endif #endif
rcu_irq_exit(); rcu_irq_exit();
preempt_enable_no_resched(); sched_preempt_enable_no_resched();
} }
/* /*
...@@ -759,7 +759,7 @@ static int run_ksoftirqd(void * __bind_cpu) ...@@ -759,7 +759,7 @@ static int run_ksoftirqd(void * __bind_cpu)
if (local_softirq_pending()) if (local_softirq_pending())
__do_softirq(); __do_softirq();
local_irq_enable(); local_irq_enable();
preempt_enable_no_resched(); sched_preempt_enable_no_resched();
cond_resched(); cond_resched();
preempt_disable(); preempt_disable();
rcu_note_context_switch((long)__bind_cpu); rcu_note_context_switch((long)__bind_cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment