Commit 71e20f18 authored by Ingo Molnar's avatar Ingo Molnar

sched: affine sync wakeups

make sync wakeups affine for cache-cold tasks: if a cache-cold task
is woken up by a sync wakeup then use the opportunity to migrate it
straight away. (the two tasks are 'related' because they communicate)
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d172fcd3
......@@ -382,7 +382,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible(&pipe->wait);
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
......@@ -555,7 +555,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
out:
mutex_unlock(&inode->i_mutex);
if (do_wakeup) {
wake_up_interruptible(&pipe->wait);
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0)
......@@ -649,7 +649,7 @@ pipe_release(struct inode *inode, int decr, int decw)
if (!pipe->readers && !pipe->writers) {
free_pipe_info(inode);
} else {
wake_up_interruptible(&pipe->wait);
wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
......
......@@ -1521,6 +1521,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
unsigned long tl = this_load;
unsigned long tl_per_task;
/*
* Attract cache-cold tasks on sync wakeups:
*/
if (sync && !task_hot(p, rq->clock, this_sd))
goto out_set_cpu;
schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
......@@ -1598,7 +1604,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
* the waker guarantees that the freshly woken up task is going
* to be considered on this CPU.)
*/
if (!sync || cpu != this_cpu)
if (!sync || rq->curr == rq->idle)
check_preempt_curr(rq, p);
success = 1;
......
......@@ -334,7 +334,7 @@ static void unix_write_space(struct sock *sk)
read_lock(&sk->sk_callback_lock);
if (unix_writable(sk)) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
wake_up_interruptible(sk->sk_sleep);
wake_up_interruptible_sync(sk->sk_sleep);
sk_wake_async(sk, 2, POLL_OUT);
}
read_unlock(&sk->sk_callback_lock);
......@@ -1639,7 +1639,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
goto out_unlock;
wake_up_interruptible(&u->peer_wait);
wake_up_interruptible_sync(&u->peer_wait);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment