Commit c3114a84 authored by Anshuman Khandual's avatar Anshuman Khandual Committed by Linus Torvalds

mm: hugetlb: soft-offline: dissolve source hugepage after successful migration

Currently hugepage migrated by soft-offline (i.e.  due to correctable
memory errors) is contained as a hugepage, which means many non-error
pages in it are unreusable, i.e.  wasted.

This patch solves this issue by dissolving source hugepages into buddy.
As done in previous patch, PageHWPoison is set only on a head page of
the error hugepage.  Then in dissoliving we move the PageHWPoison flag
to the raw error page so that all healthy subpages return back to buddy.

[arnd@arndb.de: fix warnings: replace some macros with inline functions]
  Link: http://lkml.kernel.org/r/20170609102544.2947326-1-arnd@arndb.de
Link: http://lkml.kernel.org/r/1496305019-5493-5-git-send-email-n-horiguchi@ah.jp.nec.comSigned-off-by: default avatarAnshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b37ff71c
......@@ -472,6 +472,7 @@ static inline pgoff_t basepage_index(struct page *page)
return __basepage_index(page);
}
extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
static inline bool hugepage_migration_supported(struct hstate *h)
......@@ -550,15 +551,37 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1;
}
#define hstate_index_to_shift(index) 0
#define hstate_index(h) 0
static inline unsigned hstate_index_to_shift(unsigned index)
{
return 0;
}
static inline int hstate_index(struct hstate *h)
{
return 0;
}
static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}
#define dissolve_free_huge_pages(s, e) 0
#define hugepage_migration_supported(h) false
static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
}
static inline int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn)
{
return 0;
}
static inline bool hugepage_migration_supported(struct hstate *h)
{
return false;
}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
......
......@@ -1459,7 +1459,7 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
* number of free hugepages would be reduced below the number of reserved
* hugepages.
*/
static int dissolve_free_huge_page(struct page *page)
int dissolve_free_huge_page(struct page *page)
{
int rc = 0;
......@@ -1472,6 +1472,14 @@ static int dissolve_free_huge_page(struct page *page)
rc = -EBUSY;
goto out;
}
/*
* Move PageHWPoison flag from head page to the raw error page,
* which makes any subpages rather than the error page reusable.
*/
if (PageHWPoison(head) && page != head) {
SetPageHWPoison(page);
ClearPageHWPoison(head);
}
list_del(&head->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
......
......@@ -1575,11 +1575,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (ret > 0)
ret = -EIO;
} else {
/* overcommit hugetlb page will be freed to buddy */
SetPageHWPoison(page);
if (PageHuge(page))
dequeue_hwpoisoned_huge_page(hpage);
num_poisoned_pages_inc();
dissolve_free_huge_page(page);
}
return ret;
}
......
......@@ -1252,6 +1252,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
out:
if (rc != -EAGAIN)
putback_active_hugepage(hpage);
if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
num_poisoned_pages_inc();
/*
* If migration was not successful and there's a freeing callback, use
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment