Commit 7cd3f199 authored by Rusty Russell's avatar Rusty Russell Committed by Linus Torvalds

[PATCH] More care in sys_setaffinity

We currently mask off offline CPUs in both set_cpus_allowed and
sys_sched_setaffinity.  This is firstly redundant, and secondly
erroneous when more CPUs come online (eg. setting affinity to all 1s
should mean all CPUs, including future ones).

We mask with cpu_online_map() in sys_sched_getaffinity *anyway* (which
is another issue, since this is not valid with changing of online
cpus either), so userspace won't see any difference.

This patch makes set_cpus_allowed() return -errno, and check that in
sys_sched_setaffinity.
parent 923c572e
......@@ -103,7 +103,7 @@ void set_user_nice(task_t *p, long nice)
Sets the "nice" value of task p to the given value.
int setscheduler(pid_t pid, int policy, struct sched_param *param)
Sets the scheduling policy and parameters for the given pid.
void set_cpus_allowed(task_t *p, unsigned long new_mask)
int set_cpus_allowed(task_t *p, unsigned long new_mask)
Sets a given task's CPU affinity and migrates it to a proper cpu.
Callers must have a valid reference to the task and assure the
task not exit prematurely. No locks can be held during the call.
......
......@@ -483,9 +483,12 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0)
#define PF_LESS_THROTTLE 0x01000000 /* Throttle me less: I clena memory */
#ifdef CONFIG_SMP
extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
extern int set_cpus_allowed(task_t *p, unsigned long new_mask);
#else
# define set_cpus_allowed(p, new_mask) do { } while (0)
static inline int set_cpus_allowed(task_t *p, unsigned long new_mask)
{
return 0;
}
#endif
#ifdef CONFIG_NUMA
......
......@@ -1880,10 +1880,6 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
return -EFAULT;
new_mask &= cpu_online_map;
if (!new_mask)
return -EINVAL;
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
......@@ -1905,8 +1901,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
!capable(CAP_SYS_NICE))
goto out_unlock;
retval = 0;
set_cpus_allowed(p, new_mask);
retval = set_cpus_allowed(p, new_mask);
out_unlock:
put_task_struct(p);
......@@ -2269,17 +2264,14 @@ typedef struct {
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
void set_cpus_allowed(task_t *p, unsigned long new_mask)
int set_cpus_allowed(task_t *p, unsigned long new_mask)
{
unsigned long flags;
migration_req_t req;
runqueue_t *rq;
#if 0 /* FIXME: Grab cpu_lock, return error on this case. --RR */
new_mask &= cpu_online_map;
if (!new_mask)
BUG();
#endif
if (any_online_cpu(new_mask) == NR_CPUS)
return -EINVAL;
rq = task_rq_lock(p, &flags);
p->cpus_allowed = new_mask;
......@@ -2289,7 +2281,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
*/
if (new_mask & (1UL << task_cpu(p))) {
task_rq_unlock(rq, &flags);
return;
return 0;
}
/*
* If the task is not on a runqueue (and not running), then
......@@ -2298,7 +2290,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
if (!p->array && !task_running(rq, p)) {
set_task_cpu(p, any_online_cpu(p->cpus_allowed));
task_rq_unlock(rq, &flags);
return;
return 0;
}
init_completion(&req.done);
req.task = p;
......@@ -2308,6 +2300,7 @@ void set_cpus_allowed(task_t *p, unsigned long new_mask)
wake_up_process(rq->migration_thread);
wait_for_completion(&req.done);
return 0;
}
/* Move (not current) task off this cpu, onto dest cpu. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment