mm/memcg: Convert mem_cgroup_migrate() to take folios

Convert all callers of mem_cgroup_migrate() to call page_folio() first.
They all look like they're using head pages already, but this proves it.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent bbc6b703
...@@ -745,7 +745,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) ...@@ -745,7 +745,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
__mem_cgroup_uncharge_list(page_list); __mem_cgroup_uncharge_list(page_list);
} }
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); void mem_cgroup_migrate(struct folio *old, struct folio *new);
/** /**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node * mem_cgroup_lruvec - get the lru list vector for a memcg & node
...@@ -1244,7 +1244,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list) ...@@ -1244,7 +1244,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{ {
} }
static inline void mem_cgroup_migrate(struct page *old, struct page *new) static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{ {
} }
......
...@@ -835,6 +835,8 @@ EXPORT_SYMBOL(file_write_and_wait_range); ...@@ -835,6 +835,8 @@ EXPORT_SYMBOL(file_write_and_wait_range);
*/ */
void replace_page_cache_page(struct page *old, struct page *new) void replace_page_cache_page(struct page *old, struct page *new)
{ {
struct folio *fold = page_folio(old);
struct folio *fnew = page_folio(new);
struct address_space *mapping = old->mapping; struct address_space *mapping = old->mapping;
void (*freepage)(struct page *) = mapping->a_ops->freepage; void (*freepage)(struct page *) = mapping->a_ops->freepage;
pgoff_t offset = old->index; pgoff_t offset = old->index;
...@@ -848,7 +850,7 @@ void replace_page_cache_page(struct page *old, struct page *new) ...@@ -848,7 +850,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
new->mapping = mapping; new->mapping = mapping;
new->index = offset; new->index = offset;
mem_cgroup_migrate(old, new); mem_cgroup_migrate(fold, fnew);
xas_lock_irq(&xas); xas_lock_irq(&xas);
xas_store(&xas, new); xas_store(&xas, new);
......
...@@ -6891,36 +6891,35 @@ void __mem_cgroup_uncharge_list(struct list_head *page_list) ...@@ -6891,36 +6891,35 @@ void __mem_cgroup_uncharge_list(struct list_head *page_list)
} }
/** /**
* mem_cgroup_migrate - charge a page's replacement * mem_cgroup_migrate - Charge a folio's replacement.
* @oldpage: currently circulating page * @old: Currently circulating folio.
* @newpage: replacement page * @new: Replacement folio.
* *
* Charge @newpage as a replacement page for @oldpage. @oldpage will * Charge @new as a replacement folio for @old. @old will
* be uncharged upon free. * be uncharged upon free.
* *
* Both pages must be locked, @newpage->mapping must be set up. * Both folios must be locked, @new->mapping must be set up.
*/ */
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) void mem_cgroup_migrate(struct folio *old, struct folio *new)
{ {
struct folio *newfolio = page_folio(newpage);
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
long nr_pages = folio_nr_pages(newfolio); long nr_pages = folio_nr_pages(new);
unsigned long flags; unsigned long flags;
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
VM_BUG_ON_FOLIO(PageAnon(oldpage) != folio_test_anon(newfolio), newfolio); VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
VM_BUG_ON_FOLIO(compound_nr(oldpage) != nr_pages, newfolio); VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
/* Page cache replacement: new page already charged? */ /* Page cache replacement: new folio already charged? */
if (folio_memcg(newfolio)) if (folio_memcg(new))
return; return;
memcg = page_memcg(oldpage); memcg = folio_memcg(old);
VM_WARN_ON_ONCE_PAGE(!memcg, oldpage); VM_WARN_ON_ONCE_FOLIO(!memcg, old);
if (!memcg) if (!memcg)
return; return;
...@@ -6932,11 +6931,11 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) ...@@ -6932,11 +6931,11 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
} }
css_get(&memcg->css); css_get(&memcg->css);
commit_charge(newfolio, memcg); commit_charge(new, memcg);
local_irq_save(flags); local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, nr_pages); mem_cgroup_charge_statistics(memcg, nr_pages);
memcg_check_events(memcg, page_to_nid(newpage)); memcg_check_events(memcg, folio_nid(new));
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -542,6 +542,8 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, ...@@ -542,6 +542,8 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
*/ */
void migrate_page_states(struct page *newpage, struct page *page) void migrate_page_states(struct page *newpage, struct page *page)
{ {
struct folio *folio = page_folio(page);
struct folio *newfolio = page_folio(newpage);
int cpupid; int cpupid;
if (PageError(page)) if (PageError(page))
...@@ -609,7 +611,7 @@ void migrate_page_states(struct page *newpage, struct page *page) ...@@ -609,7 +611,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
copy_page_owner(page, newpage); copy_page_owner(page, newpage);
if (!PageHuge(page)) if (!PageHuge(page))
mem_cgroup_migrate(page, newpage); mem_cgroup_migrate(folio, newfolio);
} }
EXPORT_SYMBOL(migrate_page_states); EXPORT_SYMBOL(migrate_page_states);
......
...@@ -1637,6 +1637,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -1637,6 +1637,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index) struct shmem_inode_info *info, pgoff_t index)
{ {
struct page *oldpage, *newpage; struct page *oldpage, *newpage;
struct folio *old, *new;
struct address_space *swap_mapping; struct address_space *swap_mapping;
swp_entry_t entry; swp_entry_t entry;
pgoff_t swap_index; pgoff_t swap_index;
...@@ -1673,7 +1674,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, ...@@ -1673,7 +1674,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
xa_lock_irq(&swap_mapping->i_pages); xa_lock_irq(&swap_mapping->i_pages);
error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
if (!error) { if (!error) {
mem_cgroup_migrate(oldpage, newpage); old = page_folio(oldpage);
new = page_folio(newpage);
mem_cgroup_migrate(old, new);
__inc_lruvec_page_state(newpage, NR_FILE_PAGES); __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
__dec_lruvec_page_state(oldpage, NR_FILE_PAGES); __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment