Commit d86ee480 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

sched: optimize cond_resched()

Optimize cond_resched() by removing one conditional.

Currently cond_resched() checks system_state ==
SYSTEM_RUNNING in order to avoid scheduling before the
scheduler is running.

We can however, as per suggestion of Matt, use
PREEMPT_ACTIVE to accomplish that very same.
Suggested-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c99e6efe
...@@ -501,8 +501,11 @@ struct task_cputime { ...@@ -501,8 +501,11 @@ struct task_cputime {
/* /*
* Disable preemption until the scheduler is running. * Disable preemption until the scheduler is running.
* Reset by start_kernel()->sched_init()->init_idle(). * Reset by start_kernel()->sched_init()->init_idle().
*
* We include PREEMPT_ACTIVE to avoid cond_resched() from working
* before the scheduler is active -- see should_resched().
*/ */
#define INIT_PREEMPT_COUNT (1) #define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
/** /**
* struct thread_group_cputimer - thread group interval timer counts * struct thread_group_cputimer - thread group interval timer counts
......
...@@ -6541,6 +6541,11 @@ SYSCALL_DEFINE0(sched_yield) ...@@ -6541,6 +6541,11 @@ SYSCALL_DEFINE0(sched_yield)
return 0; return 0;
} }
static inline int should_resched(void)
{
return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
}
static void __cond_resched(void) static void __cond_resched(void)
{ {
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
...@@ -6560,8 +6565,7 @@ static void __cond_resched(void) ...@@ -6560,8 +6565,7 @@ static void __cond_resched(void)
int __sched _cond_resched(void) int __sched _cond_resched(void)
{ {
if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && if (should_resched()) {
system_state == SYSTEM_RUNNING) {
__cond_resched(); __cond_resched();
return 1; return 1;
} }
...@@ -6579,12 +6583,12 @@ EXPORT_SYMBOL(_cond_resched); ...@@ -6579,12 +6583,12 @@ EXPORT_SYMBOL(_cond_resched);
*/ */
int cond_resched_lock(spinlock_t *lock) int cond_resched_lock(spinlock_t *lock)
{ {
int resched = need_resched() && system_state == SYSTEM_RUNNING; int resched = should_resched();
int ret = 0; int ret = 0;
if (spin_needbreak(lock) || resched) { if (spin_needbreak(lock) || resched) {
spin_unlock(lock); spin_unlock(lock);
if (resched && need_resched()) if (resched)
__cond_resched(); __cond_resched();
else else
cpu_relax(); cpu_relax();
...@@ -6599,7 +6603,7 @@ int __sched cond_resched_softirq(void) ...@@ -6599,7 +6603,7 @@ int __sched cond_resched_softirq(void)
{ {
BUG_ON(!in_softirq()); BUG_ON(!in_softirq());
if (need_resched() && system_state == SYSTEM_RUNNING) { if (should_resched()) {
local_bh_enable(); local_bh_enable();
__cond_resched(); __cond_resched();
local_bh_disable(); local_bh_disable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment