Commit 2150328b authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] NUMA API updates

This patch three issues in NUMA API

- When 1 was passed to set_mempolicy or mbind as maxnodes argument
  get_nodes could corrupt the stack and cause a crash.  Fix that.

- Remove the restriction to do interleaving only for order 0.  Together
  with the patch that went in previously to use interleaving policy at boot
  time this should give back the original behaviour of distributing the big
  hash tables.

- Fix some bad white space in comments
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0d8e8a6c
...@@ -135,6 +135,10 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask, ...@@ -135,6 +135,10 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
unsigned long endmask; unsigned long endmask;
--maxnode; --maxnode;
bitmap_zero(nodes, MAX_NUMNODES);
if (maxnode == 0 || !nmask)
return 0;
nlongs = BITS_TO_LONGS(maxnode); nlongs = BITS_TO_LONGS(maxnode);
if ((maxnode % BITS_PER_LONG) == 0) if ((maxnode % BITS_PER_LONG) == 0)
endmask = ~0UL; endmask = ~0UL;
...@@ -143,7 +147,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask, ...@@ -143,7 +147,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
/* When the user specified more nodes than supported just check /* When the user specified more nodes than supported just check
if the non supported part is all zero. */ if the non supported part is all zero. */
if (nmask && nlongs > BITS_TO_LONGS(MAX_NUMNODES)) { if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) { for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
unsigned long t; unsigned long t;
if (get_user(t, nmask + k)) if (get_user(t, nmask + k))
...@@ -158,8 +162,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask, ...@@ -158,8 +162,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
endmask = ~0UL; endmask = ~0UL;
} }
bitmap_zero(nodes, MAX_NUMNODES); if (copy_from_user(nodes, nmask, nlongs*sizeof(unsigned long)))
if (nmask && copy_from_user(nodes, nmask, nlongs*sizeof(unsigned long)))
return -EFAULT; return -EFAULT;
nodes[nlongs-1] &= endmask; nodes[nlongs-1] &= endmask;
return mpol_check_policy(mode, nodes); return mpol_check_policy(mode, nodes);
...@@ -622,14 +625,14 @@ static unsigned offset_il_node(struct mempolicy *pol, ...@@ -622,14 +625,14 @@ static unsigned offset_il_node(struct mempolicy *pol,
/* Allocate a page in interleaved policy. /* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */ Own path because it needs to do special accounting. */
static struct page *alloc_page_interleave(unsigned gfp, unsigned nid) static struct page *alloc_page_interleave(unsigned gfp, unsigned order, unsigned nid)
{ {
struct zonelist *zl; struct zonelist *zl;
struct page *page; struct page *page;
BUG_ON(!test_bit(nid, node_online_map)); BUG_ON(!test_bit(nid, node_online_map));
zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK); zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK);
page = __alloc_pages(gfp, 0, zl); page = __alloc_pages(gfp, order, zl);
if (page && page_zone(page) == zl->zones[0]) { if (page && page_zone(page) == zl->zones[0]) {
zl->zones[0]->pageset[get_cpu()].interleave_hit++; zl->zones[0]->pageset[get_cpu()].interleave_hit++;
put_cpu(); put_cpu();
...@@ -677,7 +680,7 @@ alloc_page_vma(unsigned gfp, struct vm_area_struct *vma, unsigned long addr) ...@@ -677,7 +680,7 @@ alloc_page_vma(unsigned gfp, struct vm_area_struct *vma, unsigned long addr)
/* fall back to process interleaving */ /* fall back to process interleaving */
nid = interleave_nodes(pol); nid = interleave_nodes(pol);
} }
return alloc_page_interleave(gfp, nid); return alloc_page_interleave(gfp, 0, nid);
} }
return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol)); return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
} }
...@@ -703,8 +706,8 @@ struct page *alloc_pages_current(unsigned gfp, unsigned order) ...@@ -703,8 +706,8 @@ struct page *alloc_pages_current(unsigned gfp, unsigned order)
if (!pol || in_interrupt()) if (!pol || in_interrupt())
pol = &default_policy; pol = &default_policy;
if (pol->policy == MPOL_INTERLEAVE && order == 0) if (pol->policy == MPOL_INTERLEAVE)
return alloc_page_interleave(gfp, interleave_nodes(pol)); return alloc_page_interleave(gfp, order, interleave_nodes(pol));
return __alloc_pages(gfp, order, zonelist_policy(gfp, pol)); return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
} }
EXPORT_SYMBOL(alloc_pages_current); EXPORT_SYMBOL(alloc_pages_current);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment