Commit 8f858254 authored by Gerald Schaefer's avatar Gerald Schaefer Committed by Greg Kroah-Hartman

mm/hugetlb: check for reserved hugepages during memory offline

commit 082d5b6b upstream.

In dissolve_free_huge_pages(), free hugepages will be dissolved without
making sure that there are enough of them left to satisfy hugepage
reservations.

Fix this by adding a return value to dissolve_free_huge_pages() and
checking h->free_huge_pages vs.  h->resv_huge_pages.  Note that this may
lead to the situation where dissolve_free_huge_page() returns an error
and all free hugepages that were dissolved before that error are lost,
while the memory block still cannot be set offline.

Fixes: c8721bbb ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Link: http://lkml.kernel.org/r/20160926172811.94033-3-gerald.schaefer@de.ibm.comSigned-off-by: default avatarGerald Schaefer <gerald.schaefer@de.ibm.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Rui Teng <rui.teng@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent eec435b0
...@@ -450,7 +450,7 @@ static inline pgoff_t basepage_index(struct page *page) ...@@ -450,7 +450,7 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page); return __basepage_index(page);
} }
extern void dissolve_free_huge_pages(unsigned long start_pfn, extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn);
static inline bool hugepage_migration_supported(struct hstate *h) static inline bool hugepage_migration_supported(struct hstate *h)
{ {
...@@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page) ...@@ -518,7 +518,7 @@ static inline pgoff_t basepage_index(struct page *page)
{ {
return page->index; return page->index;
} }
#define dissolve_free_huge_pages(s, e) do {} while (0) #define dissolve_free_huge_pages(s, e) 0
#define hugepage_migration_supported(h) false #define hugepage_migration_supported(h) false
static inline spinlock_t *huge_pte_lockptr(struct hstate *h, static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
......
...@@ -1437,22 +1437,32 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, ...@@ -1437,22 +1437,32 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
/* /*
* Dissolve a given free hugepage into free buddy pages. This function does * Dissolve a given free hugepage into free buddy pages. This function does
* nothing for in-use (including surplus) hugepages. * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
* number of free hugepages would be reduced below the number of reserved
* hugepages.
*/ */
static void dissolve_free_huge_page(struct page *page) static int dissolve_free_huge_page(struct page *page)
{ {
int rc = 0;
spin_lock(&hugetlb_lock); spin_lock(&hugetlb_lock);
if (PageHuge(page) && !page_count(page)) { if (PageHuge(page) && !page_count(page)) {
struct page *head = compound_head(page); struct page *head = compound_head(page);
struct hstate *h = page_hstate(head); struct hstate *h = page_hstate(head);
int nid = page_to_nid(head); int nid = page_to_nid(head);
if (h->free_huge_pages - h->resv_huge_pages == 0) {
rc = -EBUSY;
goto out;
}
list_del(&head->lru); list_del(&head->lru);
h->free_huge_pages--; h->free_huge_pages--;
h->free_huge_pages_node[nid]--; h->free_huge_pages_node[nid]--;
h->max_huge_pages--; h->max_huge_pages--;
update_and_free_page(h, head); update_and_free_page(h, head);
} }
out:
spin_unlock(&hugetlb_lock); spin_unlock(&hugetlb_lock);
return rc;
} }
/* /*
...@@ -1460,16 +1470,22 @@ static void dissolve_free_huge_page(struct page *page) ...@@ -1460,16 +1470,22 @@ static void dissolve_free_huge_page(struct page *page)
* make specified memory blocks removable from the system. * make specified memory blocks removable from the system.
* Note that this will dissolve a free gigantic hugepage completely, if any * Note that this will dissolve a free gigantic hugepage completely, if any
* part of it lies within the given range. * part of it lies within the given range.
* Also note that if dissolve_free_huge_page() returns with an error, all
* free hugepages that were dissolved before that error are lost.
*/ */
void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
{ {
unsigned long pfn; unsigned long pfn;
int rc = 0;
if (!hugepages_supported()) if (!hugepages_supported())
return; return rc;
for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
dissolve_free_huge_page(pfn_to_page(pfn)); if (rc = dissolve_free_huge_page(pfn_to_page(pfn)))
break;
return rc;
} }
/* /*
......
...@@ -1945,7 +1945,9 @@ static int __ref __offline_pages(unsigned long start_pfn, ...@@ -1945,7 +1945,9 @@ static int __ref __offline_pages(unsigned long start_pfn,
* dissolve free hugepages in the memory block before doing offlining * dissolve free hugepages in the memory block before doing offlining
* actually in order to make hugetlbfs's object counting consistent. * actually in order to make hugetlbfs's object counting consistent.
*/ */
dissolve_free_huge_pages(start_pfn, end_pfn); ret = dissolve_free_huge_pages(start_pfn, end_pfn);
if (ret)
goto failed_removal;
/* check again */ /* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn); offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) { if (offlined_pages < 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment