mm/workingset: Convert workingset_activation to take a folio

This function already assumed it was being passed a head page.  No real
change here, except that thp_nr_pages() compiles away on kernels with
THP compiled out while folio_nr_pages() is always present.  Also convert
page_memcg_rcu() to folio_memcg_rcu().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 0de340cb
......@@ -457,20 +457,22 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
return folio_memcg(page_folio(page));
}
/*
* page_memcg_rcu - locklessly get the memory cgroup associated with a page
* @page: a pointer to the page struct
/**
* folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio.
* @folio: Pointer to the folio.
*
* Returns a pointer to the memory cgroup associated with the page,
* or NULL. This function assumes that the page is known to have a
* This function assumes that the folio is known to have a
* proper memory cgroup pointer. It's not safe to call this function
* against some type of pages, e.g. slab pages or ex-slab pages.
* against some type of folios, e.g. slab folios or ex-slab folios.
*
* Return: A pointer to the memory cgroup associated with the folio,
* or NULL.
*/
static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{
unsigned long memcg_data = READ_ONCE(page->memcg_data);
unsigned long memcg_data = READ_ONCE(folio->memcg_data);
VM_BUG_ON_PAGE(PageSlab(page), page);
VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
WARN_ON_ONCE(!rcu_read_lock_held());
if (memcg_data & MEMCG_DATA_KMEM) {
......@@ -1158,7 +1160,7 @@ static inline struct mem_cgroup *page_memcg(struct page *page)
return NULL;
}
static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return NULL;
......
......@@ -330,7 +330,7 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
void workingset_activation(struct folio *folio);
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
......
......@@ -429,7 +429,7 @@ void mark_page_accessed(struct page *page)
else
__lru_cache_activate_page(page);
ClearPageReferenced(page);
workingset_activation(page);
workingset_activation(page_folio(page));
}
if (page_is_idle(page))
clear_page_idle(page);
......
......@@ -393,13 +393,11 @@ void workingset_refault(struct page *page, void *shadow)
/**
* workingset_activation - note a page activation
* @page: page that is being activated
* @folio: Folio that is being activated.
*/
void workingset_activation(struct page *page)
void workingset_activation(struct folio *folio)
{
struct folio *folio = page_folio(page);
struct mem_cgroup *memcg;
struct lruvec *lruvec;
rcu_read_lock();
/*
......@@ -409,11 +407,10 @@ void workingset_activation(struct page *page)
* XXX: See workingset_refault() - this should return
* root_mem_cgroup even for !CONFIG_MEMCG.
*/
memcg = page_memcg_rcu(page);
memcg = folio_memcg_rcu(folio);
if (!mem_cgroup_disabled() && !memcg)
goto out;
lruvec = folio_lruvec(folio);
workingset_age_nonresident(lruvec, thp_nr_pages(page));
workingset_age_nonresident(folio_lruvec(folio), folio_nr_pages(folio));
out:
rcu_read_unlock();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment