Commit 3077805e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched-urgent-2020-07-25' of...

Merge tag 'sched-urgent-2020-07-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into master

Pull scheduler fixes from Ingo Molnar:
 "Fix a race introduced by the recent loadavg race fix, plus add a debug
  check for a hard to debug case of bogus wakeup function flags"

* tag 'sched-urgent-2020-07-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Warn if garbage is passed to default_wake_function()
  sched: Fix race against ptrace_freeze_trace()
parents 17baa442 062d3f95
...@@ -4119,9 +4119,6 @@ static void __sched notrace __schedule(bool preempt) ...@@ -4119,9 +4119,6 @@ static void __sched notrace __schedule(bool preempt)
local_irq_disable(); local_irq_disable();
rcu_note_context_switch(preempt); rcu_note_context_switch(preempt);
/* See deactivate_task() below. */
prev_state = prev->state;
/* /*
* Make sure that signal_pending_state()->signal_pending() below * Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
...@@ -4145,11 +4142,16 @@ static void __sched notrace __schedule(bool preempt) ...@@ -4145,11 +4142,16 @@ static void __sched notrace __schedule(bool preempt)
update_rq_clock(rq); update_rq_clock(rq);
switch_count = &prev->nivcsw; switch_count = &prev->nivcsw;
/* /*
* We must re-load prev->state in case ttwu_remote() changed it * We must load prev->state once (task_struct::state is volatile), such
* before we acquired rq->lock. * that:
*
* - we form a control dependency vs deactivate_task() below.
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
*/ */
if (!preempt && prev_state && prev_state == prev->state) { prev_state = prev->state;
if (!preempt && prev_state) {
if (signal_pending_state(prev_state, prev)) { if (signal_pending_state(prev_state, prev)) {
prev->state = TASK_RUNNING; prev->state = TASK_RUNNING;
} else { } else {
...@@ -4163,10 +4165,12 @@ static void __sched notrace __schedule(bool preempt) ...@@ -4163,10 +4165,12 @@ static void __sched notrace __schedule(bool preempt)
/* /*
* __schedule() ttwu() * __schedule() ttwu()
* prev_state = prev->state; if (READ_ONCE(p->on_rq) && ...) * prev_state = prev->state; if (p->on_rq && ...)
* LOCK rq->lock goto out; * if (prev_state) goto out;
* smp_mb__after_spinlock(); smp_acquire__after_ctrl_dep(); * p->on_rq = 0; smp_acquire__after_ctrl_dep();
* p->on_rq = 0; p->state = TASK_WAKING; * p->state = TASK_WAKING
*
* Where __schedule() and ttwu() have matching control dependencies.
* *
* After this, schedule() must not care about p->state any more. * After this, schedule() must not care about p->state any more.
*/ */
...@@ -4481,6 +4485,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void) ...@@ -4481,6 +4485,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
void *key) void *key)
{ {
WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
return try_to_wake_up(curr->private, mode, wake_flags); return try_to_wake_up(curr->private, mode, wake_flags);
} }
EXPORT_SYMBOL(default_wake_function); EXPORT_SYMBOL(default_wake_function);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment