Commit e4099a5e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/deadline: Fix up the smp-affinity mask tests

For now deadline tasks are not allowed to set smp affinity; however
the current tests are wrong, cure this.

The test in __sched_setscheduler() also uses an on-stack cpumask_t
which is a no-no.

Change both tests to use cpumask_subset() such that we test the root
domain span to be a subset of the cpus_allowed mask. This way we're
sure the tasks can always run on all CPUs they can be balanced over,
and have no effective affinity constraints.
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-fyqtb1lapxca3lhsxv9cumdc@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6bfd6d72
...@@ -3384,23 +3384,14 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3384,23 +3384,14 @@ static int __sched_setscheduler(struct task_struct *p,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (dl_bandwidth_enabled() && dl_policy(policy)) { if (dl_bandwidth_enabled() && dl_policy(policy)) {
cpumask_t *span = rq->rd->span; cpumask_t *span = rq->rd->span;
cpumask_t act_affinity;
/*
* cpus_allowed mask is statically initialized with
* CPU_MASK_ALL, span is instead dynamic. Here we
* compute the "dynamic" affinity of a task.
*/
cpumask_and(&act_affinity, &p->cpus_allowed,
cpu_active_mask);
/* /*
* Don't allow tasks with an affinity mask smaller than * Don't allow tasks with an affinity mask smaller than
* the entire root_domain to become SCHED_DEADLINE. We * the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available. * will also fail if there's no bandwidth available.
*/ */
if (!cpumask_equal(&act_affinity, span) || if (!cpumask_subset(span, &p->cpus_allowed) ||
rq->rd->dl_bw.bw == 0) { rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &flags); task_rq_unlock(rq, p, &flags);
return -EPERM; return -EPERM;
} }
...@@ -3420,8 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p, ...@@ -3420,8 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
* of a SCHED_DEADLINE task) we need to check if enough bandwidth * of a SCHED_DEADLINE task) we need to check if enough bandwidth
* is available. * is available.
*/ */
if ((dl_policy(policy) || dl_task(p)) && if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) {
dl_overflow(p, policy, attr)) {
task_rq_unlock(rq, p, &flags); task_rq_unlock(rq, p, &flags);
return -EBUSY; return -EBUSY;
} }
...@@ -3860,6 +3850,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ...@@ -3860,6 +3850,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
if (retval) if (retval)
goto out_unlock; goto out_unlock;
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
/* /*
* Since bandwidth control happens on root_domain basis, * Since bandwidth control happens on root_domain basis,
* if admission test is enabled, we only admit -deadline * if admission test is enabled, we only admit -deadline
...@@ -3870,16 +3864,12 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ...@@ -3870,16 +3864,12 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
if (task_has_dl_policy(p)) { if (task_has_dl_policy(p)) {
const struct cpumask *span = task_rq(p)->rd->span; const struct cpumask *span = task_rq(p)->rd->span;
if (dl_bandwidth_enabled() && if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
!cpumask_equal(in_mask, span)) {
retval = -EBUSY; retval = -EBUSY;
goto out_unlock; goto out_unlock;
} }
} }
#endif #endif
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
again: again:
retval = set_cpus_allowed_ptr(p, new_mask); retval = set_cpus_allowed_ptr(p, new_mask);
...@@ -4535,7 +4525,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ...@@ -4535,7 +4525,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
* When dealing with a -deadline task, we have to check if moving it to * When dealing with a -deadline task, we have to check if moving it to
* a new CPU is possible or not. In fact, this is only true iff there * a new CPU is possible or not. In fact, this is only true iff there
* is enough bandwidth available on such CPU, otherwise we want the * is enough bandwidth available on such CPU, otherwise we want the
* whole migration progedure to fail over. * whole migration procedure to fail over.
*/ */
static inline static inline
bool set_task_cpu_dl(struct task_struct *p, unsigned int cpu) bool set_task_cpu_dl(struct task_struct *p, unsigned int cpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment