Commit c1a280b6 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

sched/preempt: Use CONFIG_PREEMPTION where appropriate

CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by
CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same
functionality which today depends on CONFIG_PREEMPT.

Switch the preemption code, scheduler and init task over to use
CONFIG_PREEMPTION.

That's the first step towards RT in that area. The more complex changes are
coming separately.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20190726212124.117528401@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2a11c76e
...@@ -78,11 +78,11 @@ static __always_inline bool should_resched(int preempt_offset) ...@@ -78,11 +78,11 @@ static __always_inline bool should_resched(int preempt_offset)
tif_need_resched()); tif_need_resched());
} }
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void); extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule() #define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void); extern asmlinkage void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace() #define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */ #endif /* __ASM_PREEMPT_H */
...@@ -182,7 +182,7 @@ do { \ ...@@ -182,7 +182,7 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled()) #define preemptible() (preempt_count() == 0 && !irqs_disabled())
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
barrier(); \ barrier(); \
...@@ -203,7 +203,7 @@ do { \ ...@@ -203,7 +203,7 @@ do { \
__preempt_schedule(); \ __preempt_schedule(); \
} while (0) } while (0)
#else /* !CONFIG_PREEMPT */ #else /* !CONFIG_PREEMPTION */
#define preempt_enable() \ #define preempt_enable() \
do { \ do { \
barrier(); \ barrier(); \
...@@ -217,7 +217,7 @@ do { \ ...@@ -217,7 +217,7 @@ do { \
} while (0) } while (0)
#define preempt_check_resched() do { } while (0) #define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPTION */
#define preempt_disable_notrace() \ #define preempt_disable_notrace() \
do { \ do { \
......
...@@ -1767,7 +1767,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) ...@@ -1767,7 +1767,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact. * value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_lock() will drop the spinlock before scheduling,
*/ */
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPTION
extern int _cond_resched(void); extern int _cond_resched(void);
#else #else
static inline int _cond_resched(void) { return 0; } static inline int _cond_resched(void) { return 0; }
...@@ -1796,12 +1796,12 @@ static inline void cond_resched_rcu(void) ...@@ -1796,12 +1796,12 @@ static inline void cond_resched_rcu(void)
/* /*
* Does a critical section need to be broken due to another * Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT, * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
* but a general need for low latency) * but a general need for low latency)
*/ */
static inline int spin_needbreak(spinlock_t *lock) static inline int spin_needbreak(spinlock_t *lock)
{ {
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
return spin_is_contended(lock); return spin_is_contended(lock);
#else #else
return 0; return 0;
......
...@@ -174,7 +174,7 @@ struct task_struct init_task ...@@ -174,7 +174,7 @@ struct task_struct init_task
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.ret_stack = NULL, .ret_stack = NULL,
#endif #endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT) #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
.trace_recursion = 0, .trace_recursion = 0,
#endif #endif
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
......
...@@ -433,7 +433,7 @@ noinline void __ref rest_init(void) ...@@ -433,7 +433,7 @@ noinline void __ref rest_init(void)
/* /*
* Enable might_sleep() and smp_processor_id() checks. * Enable might_sleep() and smp_processor_id() checks.
* They cannot be enabled earlier because with CONFIG_PREEMPT=y * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
* kernel_thread() would trigger might_sleep() splats. With * kernel_thread() would trigger might_sleep() splats. With
* CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
* already, but it's stuck on the kthreadd_done completion. * already, but it's stuck on the kthreadd_done completion.
......
...@@ -3581,7 +3581,7 @@ static inline void sched_tick_start(int cpu) { } ...@@ -3581,7 +3581,7 @@ static inline void sched_tick_start(int cpu) { }
static inline void sched_tick_stop(int cpu) { } static inline void sched_tick_stop(int cpu) { }
#endif #endif
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_TRACE_PREEMPT_TOGGLE)) defined(CONFIG_TRACE_PREEMPT_TOGGLE))
/* /*
* If the value passed in is equal to the current preempt count * If the value passed in is equal to the current preempt count
...@@ -3782,7 +3782,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) ...@@ -3782,7 +3782,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
* called on the nearest possible occasion: * called on the nearest possible occasion:
* *
* - If the kernel is preemptible (CONFIG_PREEMPT=y): * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
* *
* - in syscall or exception context, at the next outmost * - in syscall or exception context, at the next outmost
* preempt_enable(). (this might be as soon as the wake_up()'s * preempt_enable(). (this might be as soon as the wake_up()'s
...@@ -3791,7 +3791,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) ...@@ -3791,7 +3791,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
* - in IRQ context, return from interrupt-handler to * - in IRQ context, return from interrupt-handler to
* preemptible context * preemptible context
* *
* - If the kernel is not preemptible (CONFIG_PREEMPT is not set) * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
* then at the next: * then at the next:
* *
* - cond_resched() call * - cond_resched() call
...@@ -4033,7 +4033,7 @@ static void __sched notrace preempt_schedule_common(void) ...@@ -4033,7 +4033,7 @@ static void __sched notrace preempt_schedule_common(void)
} while (need_resched()); } while (need_resched());
} }
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
/* /*
* this is the entry point to schedule() from in-kernel preemption * this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt * off of preempt_enable. Kernel preemptions off return from interrupt
...@@ -4105,7 +4105,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ...@@ -4105,7 +4105,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
} }
EXPORT_SYMBOL_GPL(preempt_schedule_notrace); EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPTION */
/* /*
* this is the entry point to schedule() from kernel preemption * this is the entry point to schedule() from kernel preemption
...@@ -5416,7 +5416,7 @@ SYSCALL_DEFINE0(sched_yield) ...@@ -5416,7 +5416,7 @@ SYSCALL_DEFINE0(sched_yield)
return 0; return 0;
} }
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPTION
int __sched _cond_resched(void) int __sched _cond_resched(void)
{ {
if (should_resched(0)) { if (should_resched(0)) {
...@@ -5433,7 +5433,7 @@ EXPORT_SYMBOL(_cond_resched); ...@@ -5433,7 +5433,7 @@ EXPORT_SYMBOL(_cond_resched);
* __cond_resched_lock() - if a reschedule is pending, drop the given lock, * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock. * call schedule, and on return reacquire the lock.
* *
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
* operations here to prevent schedule() from being called twice (once via * operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand). * spin_unlock(), once by hand).
*/ */
......
...@@ -7430,7 +7430,7 @@ static int detach_tasks(struct lb_env *env) ...@@ -7430,7 +7430,7 @@ static int detach_tasks(struct lb_env *env)
detached++; detached++;
env->imbalance -= load; env->imbalance -= load;
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
/* /*
* NEWIDLE balancing is a source of latency, so preemptible * NEWIDLE balancing is a source of latency, so preemptible
* kernels will stop after the first task is detached to minimize * kernels will stop after the first task is detached to minimize
......
...@@ -1943,7 +1943,7 @@ unsigned long arch_scale_freq_capacity(int cpu) ...@@ -1943,7 +1943,7 @@ unsigned long arch_scale_freq_capacity(int cpu)
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPTION
static inline void double_rq_lock(struct rq *rq1, struct rq *rq2); static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
...@@ -1995,7 +1995,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) ...@@ -1995,7 +1995,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
return ret; return ret;
} }
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPTION */
/* /*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment