Commit 9202d527 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

memcg: convert mem_cgroup_swap_full() to take a folio

All callers now have a folio, so convert the function to take a folio. 
Saves a couple of calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-48-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a160e537
......@@ -692,7 +692,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p
}
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
extern bool mem_cgroup_swap_full(struct page *page);
extern bool mem_cgroup_swap_full(struct folio *folio);
#else
static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
......@@ -714,7 +714,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return get_nr_swap_pages();
}
static inline bool mem_cgroup_swap_full(struct page *page)
static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}
......
......@@ -7406,18 +7406,18 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return nr_swap_pages;
}
bool mem_cgroup_swap_full(struct page *page)
bool mem_cgroup_swap_full(struct folio *folio)
{
struct mem_cgroup *memcg;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (vm_swap_full())
return true;
if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
return false;
memcg = page_memcg(page);
memcg = folio_memcg(folio);
if (!memcg)
return false;
......
......@@ -3647,7 +3647,7 @@ static inline bool should_try_to_free_swap(struct folio *folio,
{
if (!folio_test_swapcache(folio))
return false;
if (mem_cgroup_swap_full(&folio->page) || (vma->vm_flags & VM_LOCKED) ||
if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
folio_test_mlocked(folio))
return true;
/*
......
......@@ -148,7 +148,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
if (folio_trylock(folio)) {
if ((flags & TTRS_ANYWAY) ||
((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
((flags & TTRS_FULL) && mem_cgroup_swap_full(&folio->page)))
((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
ret = folio_free_swap(folio);
folio_unlock(folio);
}
......
......@@ -2047,8 +2047,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (folio_test_swapcache(folio) &&
(mem_cgroup_swap_full(&folio->page) ||
folio_test_mlocked(folio)))
(mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
folio_free_swap(folio);
VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
if (!folio_test_mlocked(folio)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment