Commit 0b2f630a authored by Miao Xie's avatar Miao Xie Committed by Linus Torvalds

cpusets: restructure the function update_cpumask() and update_nodemask()

Extract two functions from update_cpumask() and update_nodemask().They
will be used later for updating tasks' cpus_allowed and mems_allowed after
CPU/NODE offline/online.

[lizf@cn.fujitsu.com: build fix]
Signed-off-by: default avatarMiao Xie <miaox@cn.fujitsu.com>
Acked-by: default avatarPaul Jackson <pj@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc:  Li Zefan <lizf@cn.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 628f4235
...@@ -763,6 +763,37 @@ static void cpuset_change_cpumask(struct task_struct *tsk, ...@@ -763,6 +763,37 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
} }
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
*
* Called with cgroup_mutex held
*
* The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*
* Return 0 if successful, -errno if not.
*/
static int update_tasks_cpumask(struct cpuset *cs)
{
struct cgroup_scanner scan;
struct ptr_heap heap;
int retval;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
if (retval)
return retval;
scan.cg = cs->css.cgroup;
scan.test_task = cpuset_test_cpumask;
scan.process_task = cpuset_change_cpumask;
scan.heap = &heap;
retval = cgroup_scan_tasks(&scan);
heap_free(&heap);
return retval;
}
/** /**
* update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
* @cs: the cpuset to consider * @cs: the cpuset to consider
...@@ -771,8 +802,6 @@ static void cpuset_change_cpumask(struct task_struct *tsk, ...@@ -771,8 +802,6 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
static int update_cpumask(struct cpuset *cs, const char *buf) static int update_cpumask(struct cpuset *cs, const char *buf)
{ {
struct cpuset trialcs; struct cpuset trialcs;
struct cgroup_scanner scan;
struct ptr_heap heap;
int retval; int retval;
int is_load_balanced; int is_load_balanced;
...@@ -806,10 +835,6 @@ static int update_cpumask(struct cpuset *cs, const char *buf) ...@@ -806,10 +835,6 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
return 0; return 0;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
if (retval)
return retval;
is_load_balanced = is_sched_load_balance(&trialcs); is_load_balanced = is_sched_load_balance(&trialcs);
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
...@@ -820,12 +845,9 @@ static int update_cpumask(struct cpuset *cs, const char *buf) ...@@ -820,12 +845,9 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
* Scan tasks in the cpuset, and update the cpumasks of any * Scan tasks in the cpuset, and update the cpumasks of any
* that need an update. * that need an update.
*/ */
scan.cg = cs->css.cgroup; retval = update_tasks_cpumask(cs);
scan.test_task = cpuset_test_cpumask; if (retval < 0)
scan.process_task = cpuset_change_cpumask; return retval;
scan.heap = &heap;
cgroup_scan_tasks(&scan);
heap_free(&heap);
if (is_load_balanced) if (is_load_balanced)
rebuild_sched_domains(); rebuild_sched_domains();
...@@ -881,73 +903,25 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, ...@@ -881,73 +903,25 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
} }
/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
* cpusets mems_allowed and mems_generation, and for each
* task in the cpuset, rebind any vma mempolicies and if
* the cpuset is marked 'memory_migrate', migrate the tasks
* pages to the new memory.
*
* Call with cgroup_mutex held. May take callback_mutex during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
*/
static void *cpuset_being_rebound; static void *cpuset_being_rebound;
static int update_nodemask(struct cpuset *cs, const char *buf) /**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
* @oldmem: old mems_allowed of cpuset cs
*
* Called with cgroup_mutex held
* Return 0 if successful, -errno if not.
*/
static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
{ {
struct cpuset trialcs;
nodemask_t oldmem;
struct task_struct *p; struct task_struct *p;
struct mm_struct **mmarray; struct mm_struct **mmarray;
int i, n, ntasks; int i, n, ntasks;
int migrate; int migrate;
int fudge; int fudge;
int retval;
struct cgroup_iter it; struct cgroup_iter it;
int retval;
/*
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
* it's read-only
*/
if (cs == &top_cpuset)
return -EACCES;
trialcs = *cs;
/*
* An empty mems_allowed is ok iff there are no tasks in the cpuset.
* Since nodelist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have memory.
*/
if (!*buf) {
nodes_clear(trialcs.mems_allowed);
} else {
retval = nodelist_parse(buf, trialcs.mems_allowed);
if (retval < 0)
goto done;
if (!nodes_subset(trialcs.mems_allowed,
node_states[N_HIGH_MEMORY]))
return -EINVAL;
}
oldmem = cs->mems_allowed;
if (nodes_equal(oldmem, trialcs.mems_allowed)) {
retval = 0; /* Too easy - nothing to do */
goto done;
}
retval = validate_change(cs, &trialcs);
if (retval < 0)
goto done;
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs.mems_allowed;
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
...@@ -1014,7 +988,7 @@ static int update_nodemask(struct cpuset *cs, const char *buf) ...@@ -1014,7 +988,7 @@ static int update_nodemask(struct cpuset *cs, const char *buf)
mpol_rebind_mm(mm, &cs->mems_allowed); mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate) if (migrate)
cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
mmput(mm); mmput(mm);
} }
...@@ -1026,6 +1000,70 @@ static int update_nodemask(struct cpuset *cs, const char *buf) ...@@ -1026,6 +1000,70 @@ static int update_nodemask(struct cpuset *cs, const char *buf)
return retval; return retval;
} }
/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
* cpusets mems_allowed and mems_generation, and for each
* task in the cpuset, rebind any vma mempolicies and if
* the cpuset is marked 'memory_migrate', migrate the tasks
* pages to the new memory.
*
* Call with cgroup_mutex held. May take callback_mutex during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
*/
static int update_nodemask(struct cpuset *cs, const char *buf)
{
struct cpuset trialcs;
nodemask_t oldmem;
int retval;
/*
* top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
* it's read-only
*/
if (cs == &top_cpuset)
return -EACCES;
trialcs = *cs;
/*
* An empty mems_allowed is ok iff there are no tasks in the cpuset.
* Since nodelist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have memory.
*/
if (!*buf) {
nodes_clear(trialcs.mems_allowed);
} else {
retval = nodelist_parse(buf, trialcs.mems_allowed);
if (retval < 0)
goto done;
if (!nodes_subset(trialcs.mems_allowed,
node_states[N_HIGH_MEMORY]))
return -EINVAL;
}
oldmem = cs->mems_allowed;
if (nodes_equal(oldmem, trialcs.mems_allowed)) {
retval = 0; /* Too easy - nothing to do */
goto done;
}
retval = validate_change(cs, &trialcs);
if (retval < 0)
goto done;
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs.mems_allowed;
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
retval = update_tasks_nodemask(cs, &oldmem);
done:
return retval;
}
int current_cpuset_is_being_rebound(void) int current_cpuset_is_being_rebound(void)
{ {
return task_cs(current) == cpuset_being_rebound; return task_cs(current) == cpuset_being_rebound;
...@@ -1935,7 +1973,6 @@ void __init cpuset_init_smp(void) ...@@ -1935,7 +1973,6 @@ void __init cpuset_init_smp(void)
} }
/** /**
* cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
* @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
* @pmask: pointer to cpumask_t variable to receive cpus_allowed set. * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment