Commit 24600ce8 authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

sched: convert check_preempt_equal_prio to cpumask_var_t.

Impact: stack reduction for large NR_CPUS

Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
stack space.

We simply return if the allocation fails: since we don't use it we
could just pass NULL to cpupri_find and have it handle that.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 68e74568
...@@ -805,17 +805,20 @@ static int select_task_rq_rt(struct task_struct *p, int sync) ...@@ -805,17 +805,20 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{ {
cpumask_t mask; cpumask_var_t mask;
if (rq->curr->rt.nr_cpus_allowed == 1) if (rq->curr->rt.nr_cpus_allowed == 1)
return; return;
if (p->rt.nr_cpus_allowed != 1 if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
&& cpupri_find(&rq->rd->cpupri, p, &mask))
return; return;
if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) if (p->rt.nr_cpus_allowed != 1
return; && cpupri_find(&rq->rd->cpupri, p, mask))
goto free;
if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
goto free;
/* /*
* There appears to be other cpus that can accept * There appears to be other cpus that can accept
...@@ -824,6 +827,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) ...@@ -824,6 +827,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
*/ */
requeue_task_rt(rq, p, 1); requeue_task_rt(rq, p, 1);
resched_task(rq->curr); resched_task(rq->curr);
free:
free_cpumask_var(mask);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment