Commit 5ed0cec0 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Ingo Molnar

sched: TIF_NEED_RESCHED -> need_reshed() cleanup

Impact: cleanup

Use test_tsk_need_resched(), set_tsk_need_resched(), need_resched()
instead of using TIF_NEED_RESCHED.
Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <49B10BA4.9070209@cn.fujitsu.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7fc07d84
...@@ -1189,10 +1189,10 @@ static void resched_task(struct task_struct *p) ...@@ -1189,10 +1189,10 @@ static void resched_task(struct task_struct *p)
assert_spin_locked(&task_rq(p)->lock); assert_spin_locked(&task_rq(p)->lock);
if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) if (test_tsk_need_resched(p))
return; return;
set_tsk_thread_flag(p, TIF_NEED_RESCHED); set_tsk_need_resched(p);
cpu = task_cpu(p); cpu = task_cpu(p);
if (cpu == smp_processor_id()) if (cpu == smp_processor_id())
...@@ -1248,7 +1248,7 @@ void wake_up_idle_cpu(int cpu) ...@@ -1248,7 +1248,7 @@ void wake_up_idle_cpu(int cpu)
* lockless. The worst case is that the other CPU runs the * lockless. The worst case is that the other CPU runs the
* idle task through an additional NOOP schedule() * idle task through an additional NOOP schedule()
*/ */
set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); set_tsk_need_resched(rq->idle);
/* NEED_RESCHED must be visible before we test polling */ /* NEED_RESCHED must be visible before we test polling */
smp_mb(); smp_mb();
...@@ -4740,7 +4740,7 @@ asmlinkage void __sched preempt_schedule(void) ...@@ -4740,7 +4740,7 @@ asmlinkage void __sched preempt_schedule(void)
* between schedule and now. * between schedule and now.
*/ */
barrier(); barrier();
} while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } while (need_resched());
} }
EXPORT_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule);
...@@ -4769,7 +4769,7 @@ asmlinkage void __sched preempt_schedule_irq(void) ...@@ -4769,7 +4769,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
* between schedule and now. * between schedule and now.
*/ */
barrier(); barrier();
} while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } while (need_resched());
} }
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
......
...@@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); ...@@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
int __lockfunc __reacquire_kernel_lock(void) int __lockfunc __reacquire_kernel_lock(void)
{ {
while (!_raw_spin_trylock(&kernel_flag)) { while (!_raw_spin_trylock(&kernel_flag)) {
if (test_thread_flag(TIF_NEED_RESCHED)) if (need_resched())
return -EAGAIN; return -EAGAIN;
cpu_relax(); cpu_relax();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment