Commit 161df60e authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Andrew Morton

mm, hwpoison, hugetlb: support saving mechanism of raw error pages

When handling memory error on a hugetlb page, the error handler tries to
dissolve and turn it into 4kB pages.  If it's successfully dissolved,
PageHWPoison flag is moved to the raw error page, so that's all right. 
However, dissolve sometimes fails, then the error page is left as
hwpoisoned hugepage.  It's useful if we can retry to dissolve it to save
healthy pages, but that's not possible now because the information about
where the raw error pages is lost.

Use the private field of a few tail pages to keep that information.  The
code path of shrinking hugepage pool uses this info to try delayed
dissolve.  In order to remember multiple errors in a hugepage, a
singly-linked list originated from SUBPAGE_INDEX_HWPOISON-th tail page is
constructed.  Only simple operations (adding an entry or clearing all) are
required and the list is assumed not to be very long, so this simple data
structure should be enough.

If we failed to save raw error info, the hwpoison hugepage has errors on
unknown subpage, then this new saving mechanism does not work any more, so
disable saving new raw error info and freeing hwpoison hugepages.

Link: https://lkml.kernel.org/r/20220714042420.1847125-4-naoya.horiguchi@linux.devSigned-off-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Reported-by: default avatarkernel test robot <lkp@intel.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Liu Shixin <liushixin2@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3a194f3f
...@@ -42,6 +42,9 @@ enum { ...@@ -42,6 +42,9 @@ enum {
SUBPAGE_INDEX_CGROUP, /* reuse page->private */ SUBPAGE_INDEX_CGROUP, /* reuse page->private */
SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
#endif
#ifdef CONFIG_MEMORY_FAILURE
SUBPAGE_INDEX_HWPOISON,
#endif #endif
__NR_USED_SUBPAGE, __NR_USED_SUBPAGE,
}; };
...@@ -551,7 +554,7 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -551,7 +554,7 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Synchronization: Initially set after new page allocation with no * Synchronization: Initially set after new page allocation with no
* locking. When examined and modified during migration processing * locking. When examined and modified during migration processing
* (isolate, migrate, putback) the hugetlb_lock is held. * (isolate, migrate, putback) the hugetlb_lock is held.
* HPG_temporary - - Set on a page that is temporarily allocated from the buddy * HPG_temporary - Set on a page that is temporarily allocated from the buddy
* allocator. Typically used for migration target pages when no pages * allocator. Typically used for migration target pages when no pages
* are available in the pool. The hugetlb free page path will * are available in the pool. The hugetlb free page path will
* immediately free pages with this flag set to the buddy allocator. * immediately free pages with this flag set to the buddy allocator.
...@@ -561,6 +564,8 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -561,6 +564,8 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* HPG_freed - Set when page is on the free lists. * HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification. * Synchronization: hugetlb_lock held for examination and modification.
* HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
* HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
* that is not tracked by raw_hwp_page list.
*/ */
enum hugetlb_page_flags { enum hugetlb_page_flags {
HPG_restore_reserve = 0, HPG_restore_reserve = 0,
...@@ -568,6 +573,7 @@ enum hugetlb_page_flags { ...@@ -568,6 +573,7 @@ enum hugetlb_page_flags {
HPG_temporary, HPG_temporary,
HPG_freed, HPG_freed,
HPG_vmemmap_optimized, HPG_vmemmap_optimized,
HPG_raw_hwp_unreliable,
__NR_HPAGEFLAGS, __NR_HPAGEFLAGS,
}; };
...@@ -614,6 +620,7 @@ HPAGEFLAG(Migratable, migratable) ...@@ -614,6 +620,7 @@ HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed) HPAGEFLAG(Freed, freed)
HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
...@@ -796,6 +803,14 @@ extern int dissolve_free_huge_page(struct page *page); ...@@ -796,6 +803,14 @@ extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn, extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn); unsigned long end_pfn);
#ifdef CONFIG_MEMORY_FAILURE
extern void hugetlb_clear_page_hwpoison(struct page *hpage);
#else
static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
{
}
#endif
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported #ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h) static inline bool arch_hugetlb_migration_supported(struct hstate *h)
......
...@@ -1535,6 +1535,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page) ...@@ -1535,6 +1535,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return; return;
/*
* If we don't know which subpages are hwpoisoned, we can't free
* the hugepage, so it's leaked intentionally.
*/
if (HPageRawHwpUnreliable(page))
return;
if (hugetlb_vmemmap_restore(h, page)) { if (hugetlb_vmemmap_restore(h, page)) {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
/* /*
...@@ -1547,6 +1554,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page) ...@@ -1547,6 +1554,13 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
return; return;
} }
/*
* Move PageHWPoison flag from head page to the raw error pages,
* which makes any healthy subpages reusable.
*/
if (unlikely(PageHWPoison(page)))
hugetlb_clear_page_hwpoison(page);
for (i = 0; i < pages_per_huge_page(h); for (i = 0; i < pages_per_huge_page(h);
i++, subpage = mem_map_next(subpage, page, i)) { i++, subpage = mem_map_next(subpage, page, i)) {
subpage->flags &= ~(1 << PG_locked | 1 << PG_error | subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
...@@ -2109,15 +2123,6 @@ int dissolve_free_huge_page(struct page *page) ...@@ -2109,15 +2123,6 @@ int dissolve_free_huge_page(struct page *page)
*/ */
rc = hugetlb_vmemmap_restore(h, head); rc = hugetlb_vmemmap_restore(h, head);
if (!rc) { if (!rc) {
/*
* Move PageHWPoison flag from head page to the raw
* error page, which makes any subpages rather than
* the error page reusable.
*/
if (PageHWPoison(head) && page != head) {
SetPageHWPoison(page);
ClearPageHWPoison(head);
}
update_and_free_page(h, head, false); update_and_free_page(h, head, false);
} else { } else {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
......
...@@ -1662,6 +1662,90 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, ...@@ -1662,6 +1662,90 @@ int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
EXPORT_SYMBOL_GPL(mf_dax_kill_procs); EXPORT_SYMBOL_GPL(mf_dax_kill_procs);
#endif /* CONFIG_FS_DAX */ #endif /* CONFIG_FS_DAX */
#ifdef CONFIG_HUGETLB_PAGE
/*
* Struct raw_hwp_page represents information about "raw error page",
* constructing singly linked list originated from ->private field of
* SUBPAGE_INDEX_HWPOISON-th tail page.
*/
struct raw_hwp_page {
struct llist_node node;
struct page *page;
};
static inline struct llist_head *raw_hwp_list_head(struct page *hpage)
{
return (struct llist_head *)&page_private(hpage + SUBPAGE_INDEX_HWPOISON);
}
static void __free_raw_hwp_pages(struct page *hpage)
{
struct llist_head *head;
struct llist_node *t, *tnode;
head = raw_hwp_list_head(hpage);
llist_for_each_safe(tnode, t, head->first) {
struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
SetPageHWPoison(p->page);
kfree(p);
}
llist_del_all(head);
}
static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
{
struct llist_head *head;
struct raw_hwp_page *raw_hwp;
struct llist_node *t, *tnode;
int ret = TestSetPageHWPoison(hpage) ? -EHWPOISON : 0;
/*
* Once the hwpoison hugepage has lost reliable raw error info,
* there is little meaning to keep additional error info precisely,
* so skip to add additional raw error info.
*/
if (HPageRawHwpUnreliable(hpage))
return -EHWPOISON;
head = raw_hwp_list_head(hpage);
llist_for_each_safe(tnode, t, head->first) {
struct raw_hwp_page *p = container_of(tnode, struct raw_hwp_page, node);
if (p->page == page)
return -EHWPOISON;
}
raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);
if (raw_hwp) {
raw_hwp->page = page;
llist_add(&raw_hwp->node, head);
/* the first error event will be counted in action_result(). */
if (ret)
num_poisoned_pages_inc();
} else {
/*
* Failed to save raw error info. We no longer trace all
* hwpoisoned subpages, and we need refuse to free/dissolve
* this hwpoisoned hugepage.
*/
SetHPageRawHwpUnreliable(hpage);
/*
* Once HPageRawHwpUnreliable is set, raw_hwp_page is not
* used any more, so free it.
*/
__free_raw_hwp_pages(hpage);
}
return ret;
}
void hugetlb_clear_page_hwpoison(struct page *hpage)
{
if (HPageRawHwpUnreliable(hpage))
return;
ClearPageHWPoison(hpage);
__free_raw_hwp_pages(hpage);
}
/* /*
* Called from hugetlb code with hugetlb_lock held. * Called from hugetlb code with hugetlb_lock held.
* *
...@@ -1696,7 +1780,7 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) ...@@ -1696,7 +1780,7 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
goto out; goto out;
} }
if (TestSetPageHWPoison(head)) { if (hugetlb_set_page_hwpoison(head, page)) {
ret = -EHWPOISON; ret = -EHWPOISON;
goto out; goto out;
} }
...@@ -1708,7 +1792,6 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) ...@@ -1708,7 +1792,6 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
return ret; return ret;
} }
#ifdef CONFIG_HUGETLB_PAGE
/* /*
* Taking refcount of hugetlb pages needs extra care about race conditions * Taking refcount of hugetlb pages needs extra care about race conditions
* with basic operations like hugepage allocation/free/demotion. * with basic operations like hugepage allocation/free/demotion.
...@@ -1749,7 +1832,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb ...@@ -1749,7 +1832,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb
lock_page(head); lock_page(head);
if (hwpoison_filter(p)) { if (hwpoison_filter(p)) {
ClearPageHWPoison(head); hugetlb_clear_page_hwpoison(head);
res = -EOPNOTSUPP; res = -EOPNOTSUPP;
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment