Commit 685f3457 authored by Lee Schermerhorn's avatar Lee Schermerhorn Committed by Linus Torvalds

hugetlb: use free_pool_huge_page() to return unused surplus pages

Use the [modified] free_pool_huge_page() function to return unused
surplus pages.  This will help keep huge pages balanced across nodes
between freeing of unused surplus pages and freeing of persistent huge
pages [from set_max_huge_pages] by using the same node id "cursor". It
also eliminates some code duplication.
Signed-off-by: default avatarLee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@canonical.com>
Cc: Eric Whitney <eric.whitney@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e8c5c824
...@@ -687,7 +687,7 @@ static int hstate_next_node_to_free(struct hstate *h) ...@@ -687,7 +687,7 @@ static int hstate_next_node_to_free(struct hstate *h)
* balanced over allowed nodes. * balanced over allowed nodes.
* Called with hugetlb_lock locked. * Called with hugetlb_lock locked.
*/ */
static int free_pool_huge_page(struct hstate *h) static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
{ {
int start_nid; int start_nid;
int next_nid; int next_nid;
...@@ -697,13 +697,22 @@ static int free_pool_huge_page(struct hstate *h) ...@@ -697,13 +697,22 @@ static int free_pool_huge_page(struct hstate *h)
next_nid = start_nid; next_nid = start_nid;
do { do {
if (!list_empty(&h->hugepage_freelists[next_nid])) { /*
* If we're returning unused surplus pages, only examine
* nodes with surplus pages.
*/
if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
!list_empty(&h->hugepage_freelists[next_nid])) {
struct page *page = struct page *page =
list_entry(h->hugepage_freelists[next_nid].next, list_entry(h->hugepage_freelists[next_nid].next,
struct page, lru); struct page, lru);
list_del(&page->lru); list_del(&page->lru);
h->free_huge_pages--; h->free_huge_pages--;
h->free_huge_pages_node[next_nid]--; h->free_huge_pages_node[next_nid]--;
if (acct_surplus) {
h->surplus_huge_pages--;
h->surplus_huge_pages_node[next_nid]--;
}
update_and_free_page(h, page); update_and_free_page(h, page);
ret = 1; ret = 1;
} }
...@@ -884,22 +893,13 @@ static int gather_surplus_pages(struct hstate *h, int delta) ...@@ -884,22 +893,13 @@ static int gather_surplus_pages(struct hstate *h, int delta)
* When releasing a hugetlb pool reservation, any surplus pages that were * When releasing a hugetlb pool reservation, any surplus pages that were
* allocated to satisfy the reservation must be explicitly freed if they were * allocated to satisfy the reservation must be explicitly freed if they were
* never used. * never used.
* Called with hugetlb_lock held.
*/ */
static void return_unused_surplus_pages(struct hstate *h, static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages) unsigned long unused_resv_pages)
{ {
static int nid = -1;
struct page *page;
unsigned long nr_pages; unsigned long nr_pages;
/*
* We want to release as many surplus pages as possible, spread
* evenly across all nodes. Iterate across all nodes until we
* can no longer free unreserved surplus pages. This occurs when
* the nodes with surplus pages have no free pages.
*/
unsigned long remaining_iterations = nr_online_nodes;
/* Uncommit the reservation */ /* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages; h->resv_huge_pages -= unused_resv_pages;
...@@ -909,26 +909,17 @@ static void return_unused_surplus_pages(struct hstate *h, ...@@ -909,26 +909,17 @@ static void return_unused_surplus_pages(struct hstate *h,
nr_pages = min(unused_resv_pages, h->surplus_huge_pages); nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
while (remaining_iterations-- && nr_pages) { /*
nid = next_node(nid, node_online_map); * We want to release as many surplus pages as possible, spread
if (nid == MAX_NUMNODES) * evenly across all nodes. Iterate across all nodes until we
nid = first_node(node_online_map); * can no longer free unreserved surplus pages. This occurs when
* the nodes with surplus pages have no free pages.
if (!h->surplus_huge_pages_node[nid]) * free_pool_huge_page() will balance the the frees across the
continue; * on-line nodes for us and will handle the hstate accounting.
*/
if (!list_empty(&h->hugepage_freelists[nid])) { while (nr_pages--) {
page = list_entry(h->hugepage_freelists[nid].next, if (!free_pool_huge_page(h, 1))
struct page, lru); break;
list_del(&page->lru);
update_and_free_page(h, page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
nr_pages--;
remaining_iterations = nr_online_nodes;
}
} }
} }
...@@ -1268,7 +1259,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count) ...@@ -1268,7 +1259,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
min_count = max(count, min_count); min_count = max(count, min_count);
try_to_free_low(h, min_count); try_to_free_low(h, min_count);
while (min_count < persistent_huge_pages(h)) { while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h)) if (!free_pool_huge_page(h, 0))
break; break;
} }
while (count < persistent_huge_pages(h)) { while (count < persistent_huge_pages(h)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment