Commit bbe373f2 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

oom: compare cpuset mems_allowed instead of exclusive ancestors

Instead of testing for overlap in the memory nodes of the the nearest
exclusive ancestor of both current and the candidate task, it is better to
simply test for intersection between the task's mems_allowed in their task
descriptors.  This does not require taking callback_mutex since it is only
used as a hint in the badness scoring.

Tasks that do not have an intersection in their mems_allowed with the current
task are not explicitly restricted from being OOM killed because it is quite
possible that the candidate task has allocated memory there before and has
since changed its mems_allowed.

Cc: Andrea Arcangeli <andrea@suse.de>
Acked-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7213f506
...@@ -45,7 +45,8 @@ static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) ...@@ -45,7 +45,8 @@ static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
__cpuset_zone_allowed_hardwall(z, gfp_mask); __cpuset_zone_allowed_hardwall(z, gfp_mask);
} }
extern int cpuset_excl_nodes_overlap(const struct task_struct *p); extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
#define cpuset_memory_pressure_bump() \ #define cpuset_memory_pressure_bump() \
do { \ do { \
...@@ -113,7 +114,8 @@ static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) ...@@ -113,7 +114,8 @@ static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
return 1; return 1;
} }
static inline int cpuset_excl_nodes_overlap(const struct task_struct *p) static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2)
{ {
return 1; return 1;
} }
......
...@@ -2506,41 +2506,20 @@ int cpuset_mem_spread_node(void) ...@@ -2506,41 +2506,20 @@ int cpuset_mem_spread_node(void)
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
/** /**
* cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors? * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
* @p: pointer to task_struct of some other task. * @tsk1: pointer to task_struct of some task.
* * @tsk2: pointer to task_struct of some other task.
* Description: Return true if the nearest mem_exclusive ancestor *
* cpusets of tasks @p and current overlap. Used by oom killer to * Description: Return true if @tsk1's mems_allowed intersects the
* determine if task @p's memory usage might impact the memory * mems_allowed of @tsk2. Used by the OOM killer to determine if
* available to the current task. * one of the task's memory usage might impact the memory available
* * to the other.
* Call while holding callback_mutex.
**/ **/
int cpuset_excl_nodes_overlap(const struct task_struct *p) int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2)
{ {
const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */ return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
int overlap = 1; /* do cpusets overlap? */
task_lock(current);
if (current->flags & PF_EXITING) {
task_unlock(current);
goto done;
}
cs1 = nearest_exclusive_ancestor(current->cpuset);
task_unlock(current);
task_lock((struct task_struct *)p);
if (p->flags & PF_EXITING) {
task_unlock((struct task_struct *)p);
goto done;
}
cs2 = nearest_exclusive_ancestor(p->cpuset);
task_unlock((struct task_struct *)p);
overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
done:
return overlap;
} }
/* /*
......
...@@ -143,7 +143,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) ...@@ -143,7 +143,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* because p may have allocated or otherwise mapped memory on * because p may have allocated or otherwise mapped memory on
* this node before. However it will be less likely. * this node before. However it will be less likely.
*/ */
if (!cpuset_excl_nodes_overlap(p)) if (!cpuset_mems_allowed_intersects(current, p))
points /= 8; points /= 8;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment