mm/memcg: Add folio_lruvec_lock() and similar functions

These are the folio equivalents of lock_page_lruvec() and similar
functions.  Also convert lruvec_memcg_debug() to take a folio.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent b1baabd9
...@@ -801,15 +801,16 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); ...@@ -801,15 +801,16 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
struct lruvec *lock_page_lruvec(struct page *page); struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *lock_page_lruvec_irq(struct page *page); struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *lock_page_lruvec_irqsave(struct page *page, struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags); unsigned long *flags);
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page); void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
#else #else
static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) static inline
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{ {
} }
#endif #endif
...@@ -1261,7 +1262,8 @@ static inline struct lruvec *folio_lruvec(struct folio *folio) ...@@ -1261,7 +1262,8 @@ static inline struct lruvec *folio_lruvec(struct folio *folio)
return &pgdat->__lruvec; return &pgdat->__lruvec;
} }
static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) static inline
void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{ {
} }
...@@ -1291,26 +1293,26 @@ static inline void mem_cgroup_put(struct mem_cgroup *memcg) ...@@ -1291,26 +1293,26 @@ static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{ {
} }
static inline struct lruvec *lock_page_lruvec(struct page *page) static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
{ {
struct pglist_data *pgdat = page_pgdat(page); struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock(&pgdat->__lruvec.lru_lock); spin_lock(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec; return &pgdat->__lruvec;
} }
static inline struct lruvec *lock_page_lruvec_irq(struct page *page) static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{ {
struct pglist_data *pgdat = page_pgdat(page); struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irq(&pgdat->__lruvec.lru_lock); spin_lock_irq(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec; return &pgdat->__lruvec;
} }
static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page, static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flagsp) unsigned long *flagsp)
{ {
struct pglist_data *pgdat = page_pgdat(page); struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
return &pgdat->__lruvec; return &pgdat->__lruvec;
...@@ -1576,6 +1578,7 @@ static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec) ...@@ -1576,6 +1578,7 @@ static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
static inline struct lruvec *relock_page_lruvec_irq(struct page *page, static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
struct lruvec *locked_lruvec) struct lruvec *locked_lruvec)
{ {
struct folio *folio = page_folio(page);
if (locked_lruvec) { if (locked_lruvec) {
if (page_matches_lruvec(page, locked_lruvec)) if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec; return locked_lruvec;
...@@ -1583,13 +1586,14 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page, ...@@ -1583,13 +1586,14 @@ static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
unlock_page_lruvec_irq(locked_lruvec); unlock_page_lruvec_irq(locked_lruvec);
} }
return lock_page_lruvec_irq(page); return folio_lruvec_lock_irq(folio);
} }
/* Don't lock again iff page's lruvec locked */ /* Don't lock again iff page's lruvec locked */
static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
struct lruvec *locked_lruvec, unsigned long *flags) struct lruvec *locked_lruvec, unsigned long *flags)
{ {
struct folio *folio = page_folio(page);
if (locked_lruvec) { if (locked_lruvec) {
if (page_matches_lruvec(page, locked_lruvec)) if (page_matches_lruvec(page, locked_lruvec))
return locked_lruvec; return locked_lruvec;
...@@ -1597,7 +1601,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page, ...@@ -1597,7 +1601,7 @@ static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
unlock_page_lruvec_irqrestore(locked_lruvec, *flags); unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
} }
return lock_page_lruvec_irqsave(page, flags); return folio_lruvec_lock_irqsave(folio, flags);
} }
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
......
...@@ -1032,7 +1032,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ...@@ -1032,7 +1032,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);
locked = lruvec; locked = lruvec;
lruvec_memcg_debug(lruvec, page); lruvec_memcg_debug(lruvec, page_folio(page));
/* Try get exclusive access under lock */ /* Try get exclusive access under lock */
if (!skip_updated) { if (!skip_updated) {
......
...@@ -2405,7 +2405,8 @@ static void __split_huge_page_tail(struct page *head, int tail, ...@@ -2405,7 +2405,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
static void __split_huge_page(struct page *page, struct list_head *list, static void __split_huge_page(struct page *page, struct list_head *list,
pgoff_t end) pgoff_t end)
{ {
struct page *head = compound_head(page); struct folio *folio = page_folio(page);
struct page *head = &folio->page;
struct lruvec *lruvec; struct lruvec *lruvec;
struct address_space *swap_cache = NULL; struct address_space *swap_cache = NULL;
unsigned long offset = 0; unsigned long offset = 0;
...@@ -2424,7 +2425,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, ...@@ -2424,7 +2425,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
} }
/* lock lru list/PageCompound, ref frozen by page_ref_freeze */ /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
lruvec = lock_page_lruvec(head); lruvec = folio_lruvec_lock(folio);
for (i = nr - 1; i >= 1; i--) { for (i = nr - 1; i >= 1; i--) {
__split_huge_page_tail(head, i, lruvec, list); __split_huge_page_tail(head, i, lruvec, list);
......
...@@ -1126,67 +1126,88 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, ...@@ -1126,67 +1126,88 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
} }
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page) void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
return; return;
memcg = page_memcg(page); memcg = folio_memcg(folio);
if (!memcg) if (!memcg)
VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page); VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
else else
VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page); VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
} }
#endif #endif
/** /**
* lock_page_lruvec - lock and return lruvec for a given page. * folio_lruvec_lock - Lock the lruvec for a folio.
* @page: the page * @folio: Pointer to the folio.
* *
* These functions are safe to use under any of the following conditions: * These functions are safe to use under any of the following conditions:
* - page locked * - folio locked
* - PageLRU cleared * - folio_test_lru false
* - lock_page_memcg() * - folio_memcg_lock()
* - page->_refcount is zero * - folio frozen (refcount of 0)
*
* Return: The lruvec this folio is on with its lock held.
*/ */
struct lruvec *lock_page_lruvec(struct page *page) struct lruvec *folio_lruvec_lock(struct folio *folio)
{ {
struct folio *folio = page_folio(page); struct lruvec *lruvec = folio_lruvec(folio);
struct lruvec *lruvec;
lruvec = folio_lruvec(folio);
spin_lock(&lruvec->lru_lock); spin_lock(&lruvec->lru_lock);
lruvec_memcg_debug(lruvec, folio);
lruvec_memcg_debug(lruvec, page);
return lruvec; return lruvec;
} }
struct lruvec *lock_page_lruvec_irq(struct page *page) /**
* folio_lruvec_lock_irq - Lock the lruvec for a folio.
* @folio: Pointer to the folio.
*
* These functions are safe to use under any of the following conditions:
* - folio locked
* - folio_test_lru false
* - folio_memcg_lock()
* - folio frozen (refcount of 0)
*
* Return: The lruvec this folio is on with its lock held and interrupts
* disabled.
*/
struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{ {
struct folio *folio = page_folio(page); struct lruvec *lruvec = folio_lruvec(folio);
struct lruvec *lruvec;
lruvec = folio_lruvec(folio);
spin_lock_irq(&lruvec->lru_lock); spin_lock_irq(&lruvec->lru_lock);
lruvec_memcg_debug(lruvec, folio);
lruvec_memcg_debug(lruvec, page);
return lruvec; return lruvec;
} }
struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags) /**
* folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
* @folio: Pointer to the folio.
* @flags: Pointer to irqsave flags.
*
* These functions are safe to use under any of the following conditions:
* - folio locked
* - folio_test_lru false
* - folio_memcg_lock()
* - folio frozen (refcount of 0)
*
* Return: The lruvec this folio is on with its lock held and interrupts
* disabled.
*/
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags)
{ {
struct folio *folio = page_folio(page); struct lruvec *lruvec = folio_lruvec(folio);
struct lruvec *lruvec;
lruvec = folio_lruvec(folio);
spin_lock_irqsave(&lruvec->lru_lock, *flags); spin_lock_irqsave(&lruvec->lru_lock, *flags);
lruvec_memcg_debug(lruvec, folio);
lruvec_memcg_debug(lruvec, page);
return lruvec; return lruvec;
} }
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* mapping->private_lock (in __set_page_dirty_buffers) * mapping->private_lock (in __set_page_dirty_buffers)
* lock_page_memcg move_lock (in __set_page_dirty_buffers) * lock_page_memcg move_lock (in __set_page_dirty_buffers)
* i_pages lock (widely used) * i_pages lock (widely used)
* lruvec->lru_lock (in lock_page_lruvec_irq) * lruvec->lru_lock (in folio_lruvec_lock_irq)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty) * inode->i_lock (in set_page_dirty's __mark_inode_dirty)
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c) * sb_lock (within inode_lock in fs/fs-writeback.c)
......
...@@ -80,10 +80,11 @@ static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = { ...@@ -80,10 +80,11 @@ static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
static void __page_cache_release(struct page *page) static void __page_cache_release(struct page *page)
{ {
if (PageLRU(page)) { if (PageLRU(page)) {
struct folio *folio = page_folio(page);
struct lruvec *lruvec; struct lruvec *lruvec;
unsigned long flags; unsigned long flags;
lruvec = lock_page_lruvec_irqsave(page, &flags); lruvec = folio_lruvec_lock_irqsave(folio, &flags);
del_page_from_lru_list(page, lruvec); del_page_from_lru_list(page, lruvec);
__clear_page_lru_flags(page); __clear_page_lru_flags(page);
unlock_page_lruvec_irqrestore(lruvec, flags); unlock_page_lruvec_irqrestore(lruvec, flags);
...@@ -350,11 +351,12 @@ static inline void activate_page_drain(int cpu) ...@@ -350,11 +351,12 @@ static inline void activate_page_drain(int cpu)
static void activate_page(struct page *page) static void activate_page(struct page *page)
{ {
struct folio *folio = page_folio(page);
struct lruvec *lruvec; struct lruvec *lruvec;
page = compound_head(page); page = &folio->page;
if (TestClearPageLRU(page)) { if (TestClearPageLRU(page)) {
lruvec = lock_page_lruvec_irq(page); lruvec = folio_lruvec_lock_irq(folio);
__activate_page(page, lruvec); __activate_page(page, lruvec);
unlock_page_lruvec_irq(lruvec); unlock_page_lruvec_irq(lruvec);
SetPageLRU(page); SetPageLRU(page);
......
...@@ -2090,6 +2090,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -2090,6 +2090,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
*/ */
int isolate_lru_page(struct page *page) int isolate_lru_page(struct page *page)
{ {
struct folio *folio = page_folio(page);
int ret = -EBUSY; int ret = -EBUSY;
VM_BUG_ON_PAGE(!page_count(page), page); VM_BUG_ON_PAGE(!page_count(page), page);
...@@ -2099,7 +2100,7 @@ int isolate_lru_page(struct page *page) ...@@ -2099,7 +2100,7 @@ int isolate_lru_page(struct page *page)
struct lruvec *lruvec; struct lruvec *lruvec;
get_page(page); get_page(page);
lruvec = lock_page_lruvec_irq(page); lruvec = folio_lruvec_lock_irq(folio);
del_page_from_lru_list(page, lruvec); del_page_from_lru_list(page, lruvec);
unlock_page_lruvec_irq(lruvec); unlock_page_lruvec_irq(lruvec);
ret = 0; ret = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment