Commit 7a57f32a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/core: Avoid obvious double update_rq_clock()

Add DEQUEUE_NOCLOCK to all places where we just did an
update_rq_clock() already.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bce4dc80
...@@ -1062,7 +1062,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ...@@ -1062,7 +1062,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
* holding rq->lock. * holding rq->lock.
*/ */
lockdep_assert_held(&rq->lock); lockdep_assert_held(&rq->lock);
dequeue_task(rq, p, DEQUEUE_SAVE); dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
} }
if (running) if (running)
put_prev_task(rq, p); put_prev_task(rq, p);
...@@ -2555,7 +2555,7 @@ void wake_up_new_task(struct task_struct *p) ...@@ -2555,7 +2555,7 @@ void wake_up_new_task(struct task_struct *p)
update_rq_clock(rq); update_rq_clock(rq);
post_init_entity_util_avg(&p->se); post_init_entity_util_avg(&p->se);
activate_task(rq, p, 0); activate_task(rq, p, ENQUEUE_NOCLOCK);
p->on_rq = TASK_ON_RQ_QUEUED; p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p); trace_sched_wakeup_new(p);
check_preempt_curr(rq, p, WF_FORK); check_preempt_curr(rq, p, WF_FORK);
...@@ -3683,7 +3683,8 @@ EXPORT_SYMBOL(default_wake_function); ...@@ -3683,7 +3683,8 @@ EXPORT_SYMBOL(default_wake_function);
*/ */
void rt_mutex_setprio(struct task_struct *p, int prio) void rt_mutex_setprio(struct task_struct *p, int prio)
{ {
int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; int oldprio, queued, running, queue_flag =
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
const struct sched_class *prev_class; const struct sched_class *prev_class;
struct rq_flags rf; struct rq_flags rf;
struct rq *rq; struct rq *rq;
...@@ -3804,7 +3805,7 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -3804,7 +3805,7 @@ void set_user_nice(struct task_struct *p, long nice)
queued = task_on_rq_queued(p); queued = task_on_rq_queued(p);
running = task_current(rq, p); running = task_current(rq, p);
if (queued) if (queued)
dequeue_task(rq, p, DEQUEUE_SAVE); dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK);
if (running) if (running)
put_prev_task(rq, p); put_prev_task(rq, p);
...@@ -4125,7 +4126,7 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -4125,7 +4126,7 @@ static int __sched_setscheduler(struct task_struct *p,
const struct sched_class *prev_class; const struct sched_class *prev_class;
struct rq_flags rf; struct rq_flags rf;
int reset_on_fork; int reset_on_fork;
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct rq *rq; struct rq *rq;
/* May grab non-irq protected spin_locks: */ /* May grab non-irq protected spin_locks: */
...@@ -6413,7 +6414,8 @@ static void sched_change_group(struct task_struct *tsk, int type) ...@@ -6413,7 +6414,8 @@ static void sched_change_group(struct task_struct *tsk, int type)
*/ */
void sched_move_task(struct task_struct *tsk) void sched_move_task(struct task_struct *tsk)
{ {
int queued, running; int queued, running, queue_flags =
DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
struct rq_flags rf; struct rq_flags rf;
struct rq *rq; struct rq *rq;
...@@ -6424,14 +6426,14 @@ void sched_move_task(struct task_struct *tsk) ...@@ -6424,14 +6426,14 @@ void sched_move_task(struct task_struct *tsk)
queued = task_on_rq_queued(tsk); queued = task_on_rq_queued(tsk);
if (queued) if (queued)
dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE); dequeue_task(rq, tsk, queue_flags);
if (running) if (running)
put_prev_task(rq, tsk); put_prev_task(rq, tsk);
sched_change_group(tsk, TASK_MOVE_GROUP); sched_change_group(tsk, TASK_MOVE_GROUP);
if (queued) if (queued)
enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE | ENQUEUE_NOCLOCK); enqueue_task(rq, tsk, queue_flags);
if (running) if (running)
set_curr_task(rq, tsk); set_curr_task(rq, tsk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment