Commit 5a16f3d3 authored by Rusty Russell's avatar Rusty Russell Committed by Ingo Molnar

sched: convert struct (sys_)sched_setaffinity() to cpumask_var_t.

Impact: stack usage reduction

Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves
space on the stack.  cpumask_var_t is just a struct cpumask for
!CONFIG_CPUMASK_OFFSTACK.

Note the removal of the initializer of new_mask: since the first thing
we did was "cpus_and(new_mask, new_mask, cpus_allowed)" I just changed
that to "cpumask_and(new_mask, in_mask, cpus_allowed);".
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent e76bd8d9
...@@ -5378,8 +5378,7 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) ...@@ -5378,8 +5378,7 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
{ {
cpumask_t cpus_allowed; cpumask_var_t cpus_allowed, new_mask;
cpumask_t new_mask = *in_mask;
struct task_struct *p; struct task_struct *p;
int retval; int retval;
...@@ -5401,6 +5400,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) ...@@ -5401,6 +5400,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
get_task_struct(p); get_task_struct(p);
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_put_task;
}
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
retval = -ENOMEM;
goto out_free_cpus_allowed;
}
retval = -EPERM; retval = -EPERM;
if ((current->euid != p->euid) && (current->euid != p->uid) && if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE)) !capable(CAP_SYS_NICE))
...@@ -5410,24 +5417,28 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) ...@@ -5410,24 +5417,28 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
if (retval) if (retval)
goto out_unlock; goto out_unlock;
cpuset_cpus_allowed(p, &cpus_allowed); cpuset_cpus_allowed(p, cpus_allowed);
cpus_and(new_mask, new_mask, cpus_allowed); cpumask_and(new_mask, in_mask, cpus_allowed);
again: again:
retval = set_cpus_allowed_ptr(p, &new_mask); retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) { if (!retval) {
cpuset_cpus_allowed(p, &cpus_allowed); cpuset_cpus_allowed(p, cpus_allowed);
if (!cpus_subset(new_mask, cpus_allowed)) { if (!cpumask_subset(new_mask, cpus_allowed)) {
/* /*
* We must have raced with a concurrent cpuset * We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the * update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed * cpuset's cpus_allowed
*/ */
new_mask = cpus_allowed; cpumask_copy(new_mask, cpus_allowed);
goto again; goto again;
} }
} }
out_unlock: out_unlock:
free_cpumask_var(new_mask);
out_free_cpus_allowed:
free_cpumask_var(cpus_allowed);
out_put_task:
put_task_struct(p); put_task_struct(p);
put_online_cpus(); put_online_cpus();
return retval; return retval;
...@@ -5453,14 +5464,17 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ...@@ -5453,14 +5464,17 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr) unsigned long __user *user_mask_ptr)
{ {
cpumask_t new_mask; cpumask_var_t new_mask;
int retval; int retval;
retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
if (retval) return -ENOMEM;
return retval;
return sched_setaffinity(pid, &new_mask); retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
if (retval == 0)
retval = sched_setaffinity(pid, new_mask);
free_cpumask_var(new_mask);
return retval;
} }
long sched_getaffinity(pid_t pid, cpumask_t *mask) long sched_getaffinity(pid_t pid, cpumask_t *mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment