Commit 970c305a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fix from Thomas Gleixner:
 "A single scheduler fix:

  Prevent idle task from ever being preempted. That makes sure that
  synchronize_rcu_tasks() which is ignoring idle task does not pretend
  that no task is stuck in preempted state. If that happens and idle was
  preempted on a ftrace trampoline the machine crashes due to
  inconsistent state"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Call __schedule() from do_idle() without enabling preemption
parents e7a3d627 8663effb
...@@ -3502,6 +3502,31 @@ asmlinkage __visible void __sched schedule(void) ...@@ -3502,6 +3502,31 @@ asmlinkage __visible void __sched schedule(void)
} }
EXPORT_SYMBOL(schedule); EXPORT_SYMBOL(schedule);
/*
* synchronize_rcu_tasks() makes sure that no task is stuck in preempted
* state (have scheduled out non-voluntarily) by making sure that all
* tasks have either left the run queue or have gone into user space.
* As idle tasks do not do either, they must not ever be preempted
* (schedule out non-voluntarily).
*
* schedule_idle() is similar to schedule_preempt_disable() except that it
* never enables preemption because it does not call sched_submit_work().
*/
void __sched schedule_idle(void)
{
/*
* As this skips calling sched_submit_work(), which the idle task does
* regardless because that function is a nop when the task is in a
* TASK_RUNNING state, make sure this isn't used someplace that the
* current task can be in any other state. Note, idle is always in the
* TASK_RUNNING state.
*/
WARN_ON_ONCE(current->state);
do {
__schedule(false);
} while (need_resched());
}
#ifdef CONFIG_CONTEXT_TRACKING #ifdef CONFIG_CONTEXT_TRACKING
asmlinkage __visible void __sched schedule_user(void) asmlinkage __visible void __sched schedule_user(void)
{ {
......
...@@ -265,7 +265,7 @@ static void do_idle(void) ...@@ -265,7 +265,7 @@ static void do_idle(void)
smp_mb__after_atomic(); smp_mb__after_atomic();
sched_ttwu_pending(); sched_ttwu_pending();
schedule_preempt_disabled(); schedule_idle();
if (unlikely(klp_patch_pending(current))) if (unlikely(klp_patch_pending(current)))
klp_update_patch_state(current); klp_update_patch_state(current);
......
...@@ -1467,6 +1467,8 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq) ...@@ -1467,6 +1467,8 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
} }
#endif #endif
extern void schedule_idle(void);
extern void sysrq_sched_debug_show(void); extern void sysrq_sched_debug_show(void);
extern void sched_init_granularity(void); extern void sched_init_granularity(void);
extern void update_max_interval(void); extern void update_max_interval(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment