Commit 5ced66c9 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

hugetlb: abstract numa round robin selection

Need this as a separate function for a future patch.

No behaviour change.
Acked-by: default avatarAdam Litke <agl@us.ibm.com>
Acked-by: default avatarNishanth Aravamudan <nacc@us.ibm.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a3437870
...@@ -565,6 +565,27 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) ...@@ -565,6 +565,27 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
return page; return page;
} }
/*
* Use a helper variable to find the next node and then
* copy it back to hugetlb_next_nid afterwards:
* otherwise there's a window in which a racer might
* pass invalid nid MAX_NUMNODES to alloc_pages_node.
* But we don't need to use a spin_lock here: it really
* doesn't matter if occasionally a racer chooses the
* same nid as we do. Move nid forward in the mask even
* if we just successfully allocated a hugepage so that
* the next caller gets hugepages on the next node.
*/
static int hstate_next_node(struct hstate *h)
{
int next_nid;
next_nid = next_node(h->hugetlb_next_nid, node_online_map);
if (next_nid == MAX_NUMNODES)
next_nid = first_node(node_online_map);
h->hugetlb_next_nid = next_nid;
return next_nid;
}
static int alloc_fresh_huge_page(struct hstate *h) static int alloc_fresh_huge_page(struct hstate *h)
{ {
struct page *page; struct page *page;
...@@ -578,21 +599,7 @@ static int alloc_fresh_huge_page(struct hstate *h) ...@@ -578,21 +599,7 @@ static int alloc_fresh_huge_page(struct hstate *h)
page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
if (page) if (page)
ret = 1; ret = 1;
/* next_nid = hstate_next_node(h);
* Use a helper variable to find the next node and then
* copy it back to hugetlb_next_nid afterwards:
* otherwise there's a window in which a racer might
* pass invalid nid MAX_NUMNODES to alloc_pages_node.
* But we don't need to use a spin_lock here: it really
* doesn't matter if occasionally a racer chooses the
* same nid as we do. Move nid forward in the mask even
* if we just successfully allocated a hugepage so that
* the next caller gets hugepages on the next node.
*/
next_nid = next_node(h->hugetlb_next_nid, node_online_map);
if (next_nid == MAX_NUMNODES)
next_nid = first_node(node_online_map);
h->hugetlb_next_nid = next_nid;
} while (!page && h->hugetlb_next_nid != start_nid); } while (!page && h->hugetlb_next_nid != start_nid);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment