Commit 18a2f371 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

tmpfs: fix shared mempolicy leak

This fixes a regression in 3.7-rc, which has since gone into stable.

Commit 00442ad0 ("mempolicy: fix a memory corruption by refcount
imbalance in alloc_pages_vma()") changed get_vma_policy() to raise the
refcount on a shmem shared mempolicy; whereas shmem_alloc_page() went
on expecting alloc_page_vma() to drop the refcount it had acquired.
This deserves a rework: but for now fix the leak in shmem_alloc_page().

Hugh: shmem_swapin() did not need a fix, but surely it's clearer to use
the same refcounting there as in shmem_alloc_page(), delete its onstack
mempolicy, and the strange mpol_cond_copy() and __mpol_cond_copy() -
those were invented to let swapin_readahead() make an unknown number of
calls to alloc_pages_vma() with one mempolicy; but since 00442ad0,
alloc_pages_vma() has kept refcount in balance, so now no problem.
Reported-and-tested-by: default avatarTommi Rantala <tt.rantala@gmail.com>
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c702418f
...@@ -82,16 +82,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) ...@@ -82,16 +82,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
__mpol_put(pol); __mpol_put(pol);
} }
extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
struct mempolicy *frompol);
static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
struct mempolicy *frompol)
{
if (!frompol)
return frompol;
return __mpol_cond_copy(tompol, frompol);
}
extern struct mempolicy *__mpol_dup(struct mempolicy *pol); extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
static inline struct mempolicy *mpol_dup(struct mempolicy *pol) static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{ {
...@@ -215,12 +205,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) ...@@ -215,12 +205,6 @@ static inline void mpol_cond_put(struct mempolicy *pol)
{ {
} }
static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
struct mempolicy *from)
{
return from;
}
static inline void mpol_get(struct mempolicy *pol) static inline void mpol_get(struct mempolicy *pol)
{ {
} }
......
...@@ -2037,28 +2037,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) ...@@ -2037,28 +2037,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
return new; return new;
} }
/*
* If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
* eliminate the * MPOL_F_* flags that require conditional ref and
* [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
* after return. Use the returned value.
*
* Allows use of a mempolicy for, e.g., multiple allocations with a single
* policy lookup, even if the policy needs/has extra ref on lookup.
* shmem_readahead needs this.
*/
struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
struct mempolicy *frompol)
{
if (!mpol_needs_cond_ref(frompol))
return frompol;
*tompol = *frompol;
tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
__mpol_put(frompol);
return tompol;
}
/* Slow path of a mempolicy comparison */ /* Slow path of a mempolicy comparison */
bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
{ {
......
...@@ -910,25 +910,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) ...@@ -910,25 +910,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index) struct shmem_inode_info *info, pgoff_t index)
{ {
struct mempolicy mpol, *spol;
struct vm_area_struct pvma; struct vm_area_struct pvma;
struct page *page;
spol = mpol_cond_copy(&mpol,
mpol_shared_policy_lookup(&info->policy, index));
/* Create a pseudo vma that just contains the policy */ /* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0; pvma.vm_start = 0;
/* Bias interleave by inode number to distribute better across nodes */ /* Bias interleave by inode number to distribute better across nodes */
pvma.vm_pgoff = index + info->vfs_inode.i_ino; pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL; pvma.vm_ops = NULL;
pvma.vm_policy = spol; pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
return swapin_readahead(swap, gfp, &pvma, 0);
page = swapin_readahead(swap, gfp, &pvma, 0);
/* Drop reference taken by mpol_shared_policy_lookup() */
mpol_cond_put(pvma.vm_policy);
return page;
} }
static struct page *shmem_alloc_page(gfp_t gfp, static struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index) struct shmem_inode_info *info, pgoff_t index)
{ {
struct vm_area_struct pvma; struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */ /* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0; pvma.vm_start = 0;
...@@ -937,10 +941,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, ...@@ -937,10 +941,12 @@ static struct page *shmem_alloc_page(gfp_t gfp,
pvma.vm_ops = NULL; pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
/* page = alloc_page_vma(gfp, &pvma, 0);
* alloc_page_vma() will drop the shared policy reference
*/ /* Drop reference taken by mpol_shared_policy_lookup() */
return alloc_page_vma(gfp, &pvma, 0); mpol_cond_put(pvma.vm_policy);
return page;
} }
#else /* !CONFIG_NUMA */ #else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS #ifdef CONFIG_TMPFS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment