Commit 3365d167 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] preempt cleanup

This is another generic fallout from the voluntary-preempt patchset: a
cleanup of the cond_resched() infrastructure, in preparation of the latency
reduction patches.  The changes:

 - uninline cond_resched() - this makes the footprint smaller,
   especially once the number of cond_resched() points increase.

 - add a 'was rescheduled' return value to cond_resched. This makes it
   symmetric to cond_resched_lock() and later latency reduction patches
   rely on the ability to tell whether there was any preemption.

 - make cond_resched() more robust by using the same mechanism as
   preempt_kernel(): by using PREEMPT_ACTIVE. This preserves the task's
   state - e.g. if the task is in TASK_ZOMBIE but gets preempted via
   cond_resched() just prior scheduling off then this approach preserves
   TASK_ZOMBIE.

 - the patch also adds need_lockbreak() which critical sections can use 
   to detect lock-break requests.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 38e387ee
......@@ -66,7 +66,7 @@
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
# define preemptible() 0
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
......
......@@ -1065,15 +1065,24 @@ static inline int need_resched(void)
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
}
extern void __cond_resched(void);
static inline void cond_resched(void)
{
if (need_resched())
__cond_resched();
}
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact.
*/
extern int cond_resched(void);
extern int cond_resched_lock(spinlock_t * lock);
/*
* Does a critical section need to be broken due to another
* task waiting?:
*/
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
# define need_lockbreak(lock) ((lock)->break_lock)
#else
# define need_lockbreak(lock) 0
#endif
/* Reevaluate whether the task has signals pending delivery.
This is required every time the blocked sigset_t changes.
callers must hold sighand->siglock. */
......
......@@ -3433,13 +3433,25 @@ asmlinkage long sys_sched_yield(void)
return 0;
}
void __sched __cond_resched(void)
static inline void __cond_resched(void)
{
set_current_state(TASK_RUNNING);
do {
preempt_count() += PREEMPT_ACTIVE;
schedule();
preempt_count() -= PREEMPT_ACTIVE;
} while (need_resched());
}
EXPORT_SYMBOL(__cond_resched);
int __sched cond_resched(void)
{
if (need_resched()) {
__cond_resched();
return 1;
}
return 0;
}
EXPORT_SYMBOL(cond_resched);
/*
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
......@@ -3462,8 +3474,7 @@ int cond_resched_lock(spinlock_t * lock)
if (need_resched()) {
_raw_spin_unlock(lock);
preempt_enable_no_resched();
set_current_state(TASK_RUNNING);
schedule();
__cond_resched();
spin_lock(lock);
return 1;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment