Commit cfb8c750 authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton

hugetlb: perform vmemmap restoration on a list of pages

The routine update_and_free_pages_bulk already performs vmemmap
restoration on the list of hugetlb pages in a separate step.  In
preparation for more functionality to be added in this step, create a new
routine hugetlb_vmemmap_restore_folios() that will restore vmemmap for a
list of folios.

This new routine must provide sufficient feedback about errors and actual
restoration performed so that update_and_free_pages_bulk can perform
optimally.

Special care must be taken when encountering an error from
hugetlb_vmemmap_restore_folios.  We want to continue making as much
forward progress as possible.  A new routine bulk_vmemmap_restore_error
handles this specific situation.

Link: https://lkml.kernel.org/r/20231019023113.345257-5-mike.kravetz@oracle.comSigned-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Joao Martins <joao.m.martins@oracle.com>
Cc: Konrad Dybcio <konradybcio@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Usama Arif <usama.arif@bytedance.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 79359d6d
...@@ -1859,50 +1859,93 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, ...@@ -1859,50 +1859,93 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
schedule_work(&free_hpage_work); schedule_work(&free_hpage_work);
} }
static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) static void bulk_vmemmap_restore_error(struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios)
{ {
struct folio *folio, *t_folio; struct folio *folio, *t_folio;
bool clear_dtor = false;
if (!list_empty(non_hvo_folios)) {
/* /*
* First allocate required vmemmmap (if necessary) for all folios on * Free any restored hugetlb pages so that restore of the
* list. If vmemmap can not be allocated, we can not free folio to * entire list can be retried.
* lower level allocator, so add back as hugetlb surplus page. * The idea is that in the common case of ENOMEM errors freeing
* add_hugetlb_folio() removes the page from THIS list. * hugetlb pages with vmemmap we will free up memory so that we
* Use clear_dtor to note if vmemmap was successfully allocated for * can allocate vmemmap for more hugetlb pages.
* ANY page on the list.
*/ */
list_for_each_entry_safe(folio, t_folio, list, lru) { list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) { list_del(&folio->lru);
spin_lock_irq(&hugetlb_lock);
__clear_hugetlb_destructor(h, folio);
spin_unlock_irq(&hugetlb_lock);
update_and_free_hugetlb_folio(h, folio, false);
cond_resched();
}
} else {
/*
* In the case where there are no folios which can be
* immediately freed, we loop through the list trying to restore
* vmemmap individually in the hope that someone elsewhere may
* have done something to cause success (such as freeing some
* memory). If unable to restore a hugetlb page, the hugetlb
* page is made a surplus page and removed from the list.
* If are able to restore vmemmap and free one hugetlb page, we
* quit processing the list to retry the bulk operation.
*/
list_for_each_entry_safe(folio, t_folio, folio_list, lru)
if (hugetlb_vmemmap_restore(h, &folio->page)) { if (hugetlb_vmemmap_restore(h, &folio->page)) {
list_del(&folio->lru);
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
add_hugetlb_folio(h, folio, true); add_hugetlb_folio(h, folio, true);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
} else } else {
clear_dtor = true; list_del(&folio->lru);
spin_lock_irq(&hugetlb_lock);
__clear_hugetlb_destructor(h, folio);
spin_unlock_irq(&hugetlb_lock);
update_and_free_hugetlb_folio(h, folio, false);
cond_resched();
break;
} }
} }
}
static void update_and_free_pages_bulk(struct hstate *h,
struct list_head *folio_list)
{
long ret;
struct folio *folio, *t_folio;
LIST_HEAD(non_hvo_folios);
/* /*
* If vmemmmap allocation was performed on any folio above, take lock * First allocate required vmemmmap (if necessary) for all folios.
* to clear destructor of all folios on list. This avoids the need to * Carefully handle errors and free up any available hugetlb pages
* lock/unlock for each individual folio. * in an effort to make forward progress.
* The assumption is vmemmap allocation was performed on all or none
* of the folios on the list. This is true expect in VERY rare cases.
*/ */
if (clear_dtor) { retry:
ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
if (ret < 0) {
bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
goto retry;
}
/*
* At this point, list should be empty, ret should be >= 0 and there
* should only be pages on the non_hvo_folios list.
* Do note that the non_hvo_folios list could be empty.
* Without HVO enabled, ret will be 0 and there is no need to call
* __clear_hugetlb_destructor as this was done previously.
*/
VM_WARN_ON(!list_empty(folio_list));
VM_WARN_ON(ret < 0);
if (!list_empty(&non_hvo_folios) && ret) {
spin_lock_irq(&hugetlb_lock); spin_lock_irq(&hugetlb_lock);
list_for_each_entry(folio, list, lru) list_for_each_entry(folio, &non_hvo_folios, lru)
__clear_hugetlb_destructor(h, folio); __clear_hugetlb_destructor(h, folio);
spin_unlock_irq(&hugetlb_lock); spin_unlock_irq(&hugetlb_lock);
} }
/* list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
* Free folios back to low level allocators. vmemmap and destructors
* were taken care of above, so update_and_free_hugetlb_folio will
* not need to take hugetlb lock.
*/
list_for_each_entry_safe(folio, t_folio, list, lru) {
update_and_free_hugetlb_folio(h, folio, false); update_and_free_hugetlb_folio(h, folio, false);
cond_resched(); cond_resched();
} }
......
...@@ -480,6 +480,44 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) ...@@ -480,6 +480,44 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
return ret; return ret;
} }
/**
* hugetlb_vmemmap_restore_folios - restore vmemmap for every folio on the list.
* @h: hstate.
* @folio_list: list of folios.
* @non_hvo_folios: Output list of folios for which vmemmap exists.
*
* Return: number of folios for which vmemmap was restored, or an error code
* if an error was encountered restoring vmemmap for a folio.
* Folios that have vmemmap are moved to the non_hvo_folios
* list. Processing of entries stops when the first error is
* encountered. The folio that experienced the error and all
* non-processed folios will remain on folio_list.
*/
long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios)
{
struct folio *folio, *t_folio;
long restored = 0;
long ret = 0;
list_for_each_entry_safe(folio, t_folio, folio_list, lru) {
if (folio_test_hugetlb_vmemmap_optimized(folio)) {
ret = hugetlb_vmemmap_restore(h, &folio->page);
if (ret)
break;
restored++;
}
/* Add non-optimized folios to output list */
list_move(&folio->lru, non_hvo_folios);
}
if (!ret)
ret = restored;
return ret;
}
/* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head) static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
{ {
......
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head); int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head);
long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios);
void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head); void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head);
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
...@@ -45,6 +48,14 @@ static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *h ...@@ -45,6 +48,14 @@ static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *h
return 0; return 0;
} }
static long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios)
{
list_splice_init(folio_list, non_hvo_folios);
return 0;
}
static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment