Commit 4375a553 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: move page->deferred_list to folio->_deferred_list

Remove the entire block of definitions for the second tail page, and add
the deferred list to the struct folio.  This actually moves _deferred_list
to a different offset in struct folio because I don't see a need to
include the padding.

This lets us use list_for_each_entry_safe() in deferred_split_scan()
and avoid a number of calls to compound_head().

Link: https://lkml.kernel.org/r/20230111142915.1001531-25-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a8d55327
...@@ -295,11 +295,10 @@ static inline bool thp_migration_supported(void) ...@@ -295,11 +295,10 @@ static inline bool thp_migration_supported(void)
static inline struct list_head *page_deferred_list(struct page *page) static inline struct list_head *page_deferred_list(struct page *page)
{ {
/* struct folio *folio = (struct folio *)page;
* See organization of tail pages of compound page in
* "struct page" definition. VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
*/ return &folio->_deferred_list;
return &page[2].deferred_list;
} }
#else /* CONFIG_TRANSPARENT_HUGEPAGE */ #else /* CONFIG_TRANSPARENT_HUGEPAGE */
......
...@@ -141,12 +141,6 @@ struct page { ...@@ -141,12 +141,6 @@ struct page {
struct { /* Tail pages of compound page */ struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */ unsigned long compound_head; /* Bit zero is set */
}; };
struct { /* Second tail page of transparent huge page */
unsigned long _compound_pad_1; /* compound_head */
unsigned long _compound_pad_2;
/* For both global and memcg */
struct list_head deferred_list;
};
struct { /* Second tail page of hugetlb page */ struct { /* Second tail page of hugetlb page */
unsigned long _hugetlb_pad_1; /* compound_head */ unsigned long _hugetlb_pad_1; /* compound_head */
void *hugetlb_subpool; void *hugetlb_subpool;
...@@ -302,6 +296,7 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page) ...@@ -302,6 +296,7 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
* @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h. * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head(). * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
* @_deferred_list: Folios to be split under memory pressure.
* *
* A folio is a physically, virtually and logically contiguous set * A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that * of bytes. It is a power-of-two in size, and it is aligned to that
...@@ -366,6 +361,13 @@ struct folio { ...@@ -366,6 +361,13 @@ struct folio {
void *_hugetlb_cgroup; void *_hugetlb_cgroup;
void *_hugetlb_cgroup_rsvd; void *_hugetlb_cgroup_rsvd;
void *_hugetlb_hwpoison; void *_hugetlb_hwpoison;
/* private: the union with struct page is transitional */
};
struct {
unsigned long _flags_2a;
unsigned long _head_2a;
/* public: */
struct list_head _deferred_list;
/* private: the union with struct page is transitional */ /* private: the union with struct page is transitional */
}; };
struct page __page_2; struct page __page_2;
......
...@@ -2756,9 +2756,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2756,9 +2756,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
/* Prevent deferred_split_scan() touching ->_refcount */ /* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock); spin_lock(&ds_queue->split_queue_lock);
if (folio_ref_freeze(folio, 1 + extra_pins)) { if (folio_ref_freeze(folio, 1 + extra_pins)) {
if (!list_empty(page_deferred_list(&folio->page))) { if (!list_empty(&folio->_deferred_list)) {
ds_queue->split_queue_len--; ds_queue->split_queue_len--;
list_del(page_deferred_list(&folio->page)); list_del(&folio->_deferred_list);
} }
spin_unlock(&ds_queue->split_queue_lock); spin_unlock(&ds_queue->split_queue_lock);
if (mapping) { if (mapping) {
...@@ -2873,8 +2873,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, ...@@ -2873,8 +2873,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
struct pglist_data *pgdata = NODE_DATA(sc->nid); struct pglist_data *pgdata = NODE_DATA(sc->nid);
struct deferred_split *ds_queue = &pgdata->deferred_split_queue; struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
unsigned long flags; unsigned long flags;
LIST_HEAD(list), *pos, *next; LIST_HEAD(list);
struct page *page; struct folio *folio, *next;
int split = 0; int split = 0;
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
...@@ -2884,14 +2884,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, ...@@ -2884,14 +2884,13 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
spin_lock_irqsave(&ds_queue->split_queue_lock, flags); spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
/* Take pin on all head pages to avoid freeing them under us */ /* Take pin on all head pages to avoid freeing them under us */
list_for_each_safe(pos, next, &ds_queue->split_queue) { list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
page = list_entry((void *)pos, struct page, deferred_list); _deferred_list) {
page = compound_head(page); if (folio_try_get(folio)) {
if (get_page_unless_zero(page)) { list_move(&folio->_deferred_list, &list);
list_move(page_deferred_list(page), &list);
} else { } else {
/* We lost race with put_compound_page() */ /* We lost race with folio_put() */
list_del_init(page_deferred_list(page)); list_del_init(&folio->_deferred_list);
ds_queue->split_queue_len--; ds_queue->split_queue_len--;
} }
if (!--sc->nr_to_scan) if (!--sc->nr_to_scan)
...@@ -2899,16 +2898,15 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, ...@@ -2899,16 +2898,15 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
} }
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
list_for_each_safe(pos, next, &list) { list_for_each_entry_safe(folio, next, &list, _deferred_list) {
page = list_entry((void *)pos, struct page, deferred_list); if (!folio_trylock(folio))
if (!trylock_page(page))
goto next; goto next;
/* split_huge_page() removes page from list on success */ /* split_huge_page() removes page from list on success */
if (!split_huge_page(page)) if (!split_folio(folio))
split++; split++;
unlock_page(page); folio_unlock(folio);
next: next:
put_page(page); folio_put(folio);
} }
spin_lock_irqsave(&ds_queue->split_queue_lock, flags); spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment