Commit dbc7f069 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

sched: Use replace normalize_task() with __sched_setscheduler()

Reduce duplicate logic; normalize_task() is a simplified version of
__sched_setscheduler(). Parametrize the difference and collapse.

This reduces the amount of check_class_changed() sites.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: ktkhai@parallels.com
Cc: rostedt@goodmis.org
Cc: juri.lelli@gmail.com
Cc: pang.xunlei@linaro.org
Cc: oleg@redhat.com
Cc: wanpeng.li@linux.intel.com
Cc: umgwanakikbuti@gmail.com
Link: http://lkml.kernel.org/r/20150611124742.532642391@infradead.orgSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent e3fca9e7
...@@ -3438,7 +3438,7 @@ static bool dl_param_changed(struct task_struct *p, ...@@ -3438,7 +3438,7 @@ static bool dl_param_changed(struct task_struct *p,
static int __sched_setscheduler(struct task_struct *p, static int __sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr, const struct sched_attr *attr,
bool user) bool user, bool pi)
{ {
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
MAX_RT_PRIO - 1 - attr->sched_priority; MAX_RT_PRIO - 1 - attr->sched_priority;
...@@ -3624,18 +3624,20 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3624,18 +3624,20 @@ static int __sched_setscheduler(struct task_struct *p,
p->sched_reset_on_fork = reset_on_fork; p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio; oldprio = p->prio;
/* if (pi) {
* Take priority boosted tasks into account. If the new /*
* effective priority is unchanged, we just store the new * Take priority boosted tasks into account. If the new
* normal parameters and do not touch the scheduler class and * effective priority is unchanged, we just store the new
* the runqueue. This will be done when the task deboost * normal parameters and do not touch the scheduler class and
* itself. * the runqueue. This will be done when the task deboost
*/ * itself.
new_effective_prio = rt_mutex_get_effective_prio(p, newprio); */
if (new_effective_prio == oldprio) { new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
__setscheduler_params(p, attr); if (new_effective_prio == oldprio) {
task_rq_unlock(rq, p, &flags); __setscheduler_params(p, attr);
return 0; task_rq_unlock(rq, p, &flags);
return 0;
}
} }
queued = task_on_rq_queued(p); queued = task_on_rq_queued(p);
...@@ -3646,7 +3648,7 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3646,7 +3648,7 @@ static int __sched_setscheduler(struct task_struct *p,
put_prev_task(rq, p); put_prev_task(rq, p);
prev_class = p->sched_class; prev_class = p->sched_class;
__setscheduler(rq, p, attr, true); __setscheduler(rq, p, attr, pi);
if (running) if (running)
p->sched_class->set_curr_task(rq); p->sched_class->set_curr_task(rq);
...@@ -3661,7 +3663,8 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3661,7 +3663,8 @@ static int __sched_setscheduler(struct task_struct *p,
check_class_changed(rq, p, prev_class, oldprio); check_class_changed(rq, p, prev_class, oldprio);
task_rq_unlock(rq, p, &flags); task_rq_unlock(rq, p, &flags);
rt_mutex_adjust_pi(p); if (pi)
rt_mutex_adjust_pi(p);
return 0; return 0;
} }
...@@ -3682,7 +3685,7 @@ static int _sched_setscheduler(struct task_struct *p, int policy, ...@@ -3682,7 +3685,7 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
attr.sched_policy = policy; attr.sched_policy = policy;
} }
return __sched_setscheduler(p, &attr, check); return __sched_setscheduler(p, &attr, check, true);
} }
/** /**
* sched_setscheduler - change the scheduling policy and/or RT priority of a thread. * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
...@@ -3703,7 +3706,7 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); ...@@ -3703,7 +3706,7 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
int sched_setattr(struct task_struct *p, const struct sched_attr *attr) int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
{ {
return __sched_setscheduler(p, attr, true); return __sched_setscheduler(p, attr, true, true);
} }
EXPORT_SYMBOL_GPL(sched_setattr); EXPORT_SYMBOL_GPL(sched_setattr);
...@@ -7361,32 +7364,12 @@ EXPORT_SYMBOL(___might_sleep); ...@@ -7361,32 +7364,12 @@ EXPORT_SYMBOL(___might_sleep);
#endif #endif
#ifdef CONFIG_MAGIC_SYSRQ #ifdef CONFIG_MAGIC_SYSRQ
static void normalize_task(struct rq *rq, struct task_struct *p) void normalize_rt_tasks(void)
{ {
const struct sched_class *prev_class = p->sched_class; struct task_struct *g, *p;
struct sched_attr attr = { struct sched_attr attr = {
.sched_policy = SCHED_NORMAL, .sched_policy = SCHED_NORMAL,
}; };
int old_prio = p->prio;
int queued;
queued = task_on_rq_queued(p);
if (queued)
dequeue_task(rq, p, 0);
__setscheduler(rq, p, &attr, false);
if (queued) {
enqueue_task(rq, p, 0);
resched_curr(rq);
}
check_class_changed(rq, p, prev_class, old_prio);
}
void normalize_rt_tasks(void)
{
struct task_struct *g, *p;
unsigned long flags;
struct rq *rq;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process_thread(g, p) { for_each_process_thread(g, p) {
...@@ -7413,9 +7396,7 @@ void normalize_rt_tasks(void) ...@@ -7413,9 +7396,7 @@ void normalize_rt_tasks(void)
continue; continue;
} }
rq = task_rq_lock(p, &flags); __sched_setscheduler(p, &attr, false, false);
normalize_task(rq, p);
task_rq_unlock(rq, p, &flags);
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment