Commit 6991436c authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

sched/core: Provide a scheduling point for RT locks

RT enabled kernels substitute spin/rwlocks with 'sleeping' variants based
on rtmutexes. Blocking on such a lock is similar to preemption versus:

 - I/O scheduling and worker handling, because these functions might block
   on another substituted lock, or come from a lock contention within these
   functions.

 - RCU considers this like a preemption, because the task might be in a read
   side critical section.

Add a separate scheduling point for this, and hand a new scheduling mode
argument to __schedule() which allows, along with separate mode masks, to
handle this gracefully from within the scheduler, without proliferating that
to other subsystems like RCU.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211302.372319055@linutronix.de
parent b4bfa3fc
...@@ -288,6 +288,9 @@ extern long schedule_timeout_idle(long timeout); ...@@ -288,6 +288,9 @@ extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void); asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void); extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void); asmlinkage void preempt_schedule_irq(void);
#ifdef CONFIG_PREEMPT_RT
extern void schedule_rtlock(void);
#endif
extern int __must_check io_schedule_prepare(void); extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token); extern void io_schedule_finish(int token);
......
...@@ -5829,7 +5829,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) ...@@ -5829,7 +5829,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
*/ */
#define SM_NONE 0x0 #define SM_NONE 0x0
#define SM_PREEMPT 0x1 #define SM_PREEMPT 0x1
#define SM_MASK_PREEMPT (~0U) #define SM_RTLOCK_WAIT 0x2
#ifndef CONFIG_PREEMPT_RT
# define SM_MASK_PREEMPT (~0U)
#else
# define SM_MASK_PREEMPT SM_PREEMPT
#endif
/* /*
* __schedule() is the main scheduler function. * __schedule() is the main scheduler function.
...@@ -6134,6 +6140,18 @@ void __sched schedule_preempt_disabled(void) ...@@ -6134,6 +6140,18 @@ void __sched schedule_preempt_disabled(void)
preempt_disable(); preempt_disable();
} }
#ifdef CONFIG_PREEMPT_RT
void __sched notrace schedule_rtlock(void)
{
do {
preempt_disable();
__schedule(SM_RTLOCK_WAIT);
sched_preempt_enable_no_resched();
} while (need_resched());
}
NOKPROBE_SYMBOL(schedule_rtlock);
#endif
static void __sched notrace preempt_schedule_common(void) static void __sched notrace preempt_schedule_common(void)
{ {
do { do {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment