mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock()

These are the folio equivalents of lock_page_memcg() and
unlock_page_memcg().

lock_page_memcg() and unlock_page_memcg() have too many callers to be
easily replaced in a single patch, so reimplement them as wrappers for
now to be cleaned up later when enough callers have been converted to
use folios.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 9d8053fc
...@@ -978,6 +978,8 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); ...@@ -978,6 +978,8 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
extern bool cgroup_memory_noswap; extern bool cgroup_memory_noswap;
#endif #endif
void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);
void lock_page_memcg(struct page *page); void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page); void unlock_page_memcg(struct page *page);
...@@ -1397,6 +1399,14 @@ static inline void unlock_page_memcg(struct page *page) ...@@ -1397,6 +1399,14 @@ static inline void unlock_page_memcg(struct page *page)
{ {
} }
static inline void folio_memcg_lock(struct folio *folio)
{
}
static inline void folio_memcg_unlock(struct folio *folio)
{
}
static inline void mem_cgroup_handle_over_high(void) static inline void mem_cgroup_handle_over_high(void)
{ {
} }
......
...@@ -1933,18 +1933,17 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) ...@@ -1933,18 +1933,17 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
} }
/** /**
* lock_page_memcg - lock a page and memcg binding * folio_memcg_lock - Bind a folio to its memcg.
* @page: the page * @folio: The folio.
* *
* This function protects unlocked LRU pages from being moved to * This function prevents unlocked LRU folios from being moved to
* another cgroup. * another cgroup.
* *
* It ensures lifetime of the locked memcg. Caller is responsible * It ensures lifetime of the bound memcg. The caller is responsible
* for the lifetime of the page. * for the lifetime of the folio.
*/ */
void lock_page_memcg(struct page *page) void folio_memcg_lock(struct folio *folio)
{ {
struct page *head = compound_head(page); /* rmap on tail pages */
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
unsigned long flags; unsigned long flags;
...@@ -1958,7 +1957,7 @@ void lock_page_memcg(struct page *page) ...@@ -1958,7 +1957,7 @@ void lock_page_memcg(struct page *page)
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
again: again:
memcg = page_memcg(head); memcg = folio_memcg(folio);
if (unlikely(!memcg)) if (unlikely(!memcg))
return; return;
...@@ -1972,7 +1971,7 @@ void lock_page_memcg(struct page *page) ...@@ -1972,7 +1971,7 @@ void lock_page_memcg(struct page *page)
return; return;
spin_lock_irqsave(&memcg->move_lock, flags); spin_lock_irqsave(&memcg->move_lock, flags);
if (memcg != page_memcg(head)) { if (memcg != folio_memcg(folio)) {
spin_unlock_irqrestore(&memcg->move_lock, flags); spin_unlock_irqrestore(&memcg->move_lock, flags);
goto again; goto again;
} }
...@@ -1986,9 +1985,15 @@ void lock_page_memcg(struct page *page) ...@@ -1986,9 +1985,15 @@ void lock_page_memcg(struct page *page)
memcg->move_lock_task = current; memcg->move_lock_task = current;
memcg->move_lock_flags = flags; memcg->move_lock_flags = flags;
} }
EXPORT_SYMBOL(folio_memcg_lock);
void lock_page_memcg(struct page *page)
{
folio_memcg_lock(page_folio(page));
}
EXPORT_SYMBOL(lock_page_memcg); EXPORT_SYMBOL(lock_page_memcg);
static void __unlock_page_memcg(struct mem_cgroup *memcg) static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{ {
if (memcg && memcg->move_lock_task == current) { if (memcg && memcg->move_lock_task == current) {
unsigned long flags = memcg->move_lock_flags; unsigned long flags = memcg->move_lock_flags;
...@@ -2003,14 +2008,22 @@ static void __unlock_page_memcg(struct mem_cgroup *memcg) ...@@ -2003,14 +2008,22 @@ static void __unlock_page_memcg(struct mem_cgroup *memcg)
} }
/** /**
* unlock_page_memcg - unlock a page and memcg binding * folio_memcg_unlock - Release the binding between a folio and its memcg.
* @page: the page * @folio: The folio.
*
* This releases the binding created by folio_memcg_lock(). This does
* not change the accounting of this folio to its memcg, but it does
* permit others to change it.
*/ */
void unlock_page_memcg(struct page *page) void folio_memcg_unlock(struct folio *folio)
{ {
struct page *head = compound_head(page); __folio_memcg_unlock(folio_memcg(folio));
}
EXPORT_SYMBOL(folio_memcg_unlock);
__unlock_page_memcg(page_memcg(head)); void unlock_page_memcg(struct page *page)
{
folio_memcg_unlock(page_folio(page));
} }
EXPORT_SYMBOL(unlock_page_memcg); EXPORT_SYMBOL(unlock_page_memcg);
...@@ -5643,7 +5656,7 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -5643,7 +5656,7 @@ static int mem_cgroup_move_account(struct page *page,
page->memcg_data = (unsigned long)to; page->memcg_data = (unsigned long)to;
__unlock_page_memcg(from); __folio_memcg_unlock(from);
ret = 0; ret = 0;
nid = page_to_nid(page); nid = page_to_nid(page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment