Commit 42288fe3 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: mempolicy: Convert shared_policy mutex to spinlock

Sasha was fuzzing with trinity and reported the following problem:

  BUG: sleeping function called from invalid context at kernel/mutex.c:269
  in_atomic(): 1, irqs_disabled(): 0, pid: 6361, name: trinity-main
  2 locks held by trinity-main/6361:
   #0:  (&mm->mmap_sem){++++++}, at: [<ffffffff810aa314>] __do_page_fault+0x1e4/0x4f0
   #1:  (&(&mm->page_table_lock)->rlock){+.+...}, at: [<ffffffff8122f017>] handle_pte_fault+0x3f7/0x6a0
  Pid: 6361, comm: trinity-main Tainted: G        W
  3.7.0-rc2-next-20121024-sasha-00001-gd95ef01-dirty #74
  Call Trace:
    __might_sleep+0x1c3/0x1e0
    mutex_lock_nested+0x29/0x50
    mpol_shared_policy_lookup+0x2e/0x90
    shmem_get_policy+0x2e/0x30
    get_vma_policy+0x5a/0xa0
    mpol_misplaced+0x41/0x1d0
    handle_pte_fault+0x465/0x6a0

This was triggered by a different version of automatic NUMA balancing
but in theory the current version is vunerable to the same problem.

do_numa_page
  -> numa_migrate_prep
    -> mpol_misplaced
      -> get_vma_policy
        -> shmem_get_policy

It's very unlikely this will happen as shared pages are not marked
pte_numa -- see the page_mapcount() check in change_pte_range() -- but
it is possible.

To address this, this patch restores sp->lock as originally implemented
by Kosaki Motohiro.  In the path where get_vma_policy() is called, it
should not be calling sp_alloc() so it is not necessary to treat the PTL
specially.
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Tested-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5439ca6b
...@@ -123,7 +123,7 @@ struct sp_node { ...@@ -123,7 +123,7 @@ struct sp_node {
struct shared_policy { struct shared_policy {
struct rb_root root; struct rb_root root;
struct mutex mutex; spinlock_t lock;
}; };
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
......
...@@ -2132,7 +2132,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) ...@@ -2132,7 +2132,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
*/ */
/* lookup first element intersecting start-end */ /* lookup first element intersecting start-end */
/* Caller holds sp->mutex */ /* Caller holds sp->lock */
static struct sp_node * static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end) sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{ {
...@@ -2196,13 +2196,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) ...@@ -2196,13 +2196,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
if (!sp->root.rb_node) if (!sp->root.rb_node)
return NULL; return NULL;
mutex_lock(&sp->mutex); spin_lock(&sp->lock);
sn = sp_lookup(sp, idx, idx+1); sn = sp_lookup(sp, idx, idx+1);
if (sn) { if (sn) {
mpol_get(sn->policy); mpol_get(sn->policy);
pol = sn->policy; pol = sn->policy;
} }
mutex_unlock(&sp->mutex); spin_unlock(&sp->lock);
return pol; return pol;
} }
...@@ -2328,6 +2328,14 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n) ...@@ -2328,6 +2328,14 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
sp_free(n); sp_free(n);
} }
static void sp_node_init(struct sp_node *node, unsigned long start,
unsigned long end, struct mempolicy *pol)
{
node->start = start;
node->end = end;
node->policy = pol;
}
static struct sp_node *sp_alloc(unsigned long start, unsigned long end, static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
struct mempolicy *pol) struct mempolicy *pol)
{ {
...@@ -2344,10 +2352,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end, ...@@ -2344,10 +2352,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
return NULL; return NULL;
} }
newpol->flags |= MPOL_F_SHARED; newpol->flags |= MPOL_F_SHARED;
sp_node_init(n, start, end, newpol);
n->start = start;
n->end = end;
n->policy = newpol;
return n; return n;
} }
...@@ -2357,9 +2362,12 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, ...@@ -2357,9 +2362,12 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new) unsigned long end, struct sp_node *new)
{ {
struct sp_node *n; struct sp_node *n;
struct sp_node *n_new = NULL;
struct mempolicy *mpol_new = NULL;
int ret = 0; int ret = 0;
mutex_lock(&sp->mutex); restart:
spin_lock(&sp->lock);
n = sp_lookup(sp, start, end); n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */ /* Take care of old policies in the same range. */
while (n && n->start < end) { while (n && n->start < end) {
...@@ -2372,14 +2380,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, ...@@ -2372,14 +2380,16 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
} else { } else {
/* Old policy spanning whole new range. */ /* Old policy spanning whole new range. */
if (n->end > end) { if (n->end > end) {
struct sp_node *new2; if (!n_new)
new2 = sp_alloc(end, n->end, n->policy); goto alloc_new;
if (!new2) {
ret = -ENOMEM; *mpol_new = *n->policy;
goto out; atomic_set(&mpol_new->refcnt, 1);
} sp_node_init(n_new, n->end, end, mpol_new);
sp_insert(sp, n_new);
n->end = start; n->end = start;
sp_insert(sp, new2); n_new = NULL;
mpol_new = NULL;
break; break;
} else } else
n->end = start; n->end = start;
...@@ -2390,9 +2400,27 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start, ...@@ -2390,9 +2400,27 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
} }
if (new) if (new)
sp_insert(sp, new); sp_insert(sp, new);
out: spin_unlock(&sp->lock);
mutex_unlock(&sp->mutex); ret = 0;
err_out:
if (mpol_new)
mpol_put(mpol_new);
if (n_new)
kmem_cache_free(sn_cache, n_new);
return ret; return ret;
alloc_new:
spin_unlock(&sp->lock);
ret = -ENOMEM;
n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n_new)
goto err_out;
mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
if (!mpol_new)
goto err_out;
goto restart;
} }
/** /**
...@@ -2410,7 +2438,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) ...@@ -2410,7 +2438,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
int ret; int ret;
sp->root = RB_ROOT; /* empty tree == default mempolicy */ sp->root = RB_ROOT; /* empty tree == default mempolicy */
mutex_init(&sp->mutex); spin_lock_init(&sp->lock);
if (mpol) { if (mpol) {
struct vm_area_struct pvma; struct vm_area_struct pvma;
...@@ -2476,14 +2504,14 @@ void mpol_free_shared_policy(struct shared_policy *p) ...@@ -2476,14 +2504,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
if (!p->root.rb_node) if (!p->root.rb_node)
return; return;
mutex_lock(&p->mutex); spin_lock(&p->lock);
next = rb_first(&p->root); next = rb_first(&p->root);
while (next) { while (next) {
n = rb_entry(next, struct sp_node, nd); n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd); next = rb_next(&n->nd);
sp_delete(p, n); sp_delete(p, n);
} }
mutex_unlock(&p->mutex); spin_unlock(&p->lock);
} }
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment