Commit d9eb1ea2 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcontrol: delete unused lrucare handling

Swapin faults were the last event to charge pages after they had already
been put on the LRU list.  Now that we charge directly on swapin, the
lrucare portion of the charge code is unused.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Shakeel Butt <shakeelb@google.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-19-hannes@cmpxchg.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0a27cae1
...@@ -355,8 +355,7 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, ...@@ -355,8 +355,7 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
struct mem_cgroup *memcg); struct mem_cgroup *memcg);
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
bool lrucare);
void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list); void mem_cgroup_uncharge_list(struct list_head *page_list);
...@@ -839,7 +838,7 @@ static inline enum mem_cgroup_protection mem_cgroup_protected( ...@@ -839,7 +838,7 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(
} }
static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, bool lrucare) gfp_t gfp_mask)
{ {
return 0; return 0;
} }
......
...@@ -167,8 +167,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -167,8 +167,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
addr + PAGE_SIZE); addr + PAGE_SIZE);
if (new_page) { if (new_page) {
err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL, err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL);
false);
if (err) if (err)
return err; return err;
} }
......
...@@ -845,7 +845,7 @@ static int __add_to_page_cache_locked(struct page *page, ...@@ -845,7 +845,7 @@ static int __add_to_page_cache_locked(struct page *page,
page->index = offset; page->index = offset;
if (!huge) { if (!huge) {
error = mem_cgroup_charge(page, current->mm, gfp_mask, false); error = mem_cgroup_charge(page, current->mm, gfp_mask);
if (error) if (error)
goto error; goto error;
} }
......
...@@ -593,7 +593,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, ...@@ -593,7 +593,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
VM_BUG_ON_PAGE(!PageCompound(page), page); VM_BUG_ON_PAGE(!PageCompound(page), page);
if (mem_cgroup_charge(page, vma->vm_mm, gfp, false)) { if (mem_cgroup_charge(page, vma->vm_mm, gfp)) {
put_page(page); put_page(page);
count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK);
count_vm_event(THP_FAULT_FALLBACK_CHARGE); count_vm_event(THP_FAULT_FALLBACK_CHARGE);
......
...@@ -1059,7 +1059,7 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -1059,7 +1059,7 @@ static void collapse_huge_page(struct mm_struct *mm,
goto out_nolock; goto out_nolock;
} }
if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) { if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
result = SCAN_CGROUP_CHARGE_FAIL; result = SCAN_CGROUP_CHARGE_FAIL;
goto out_nolock; goto out_nolock;
} }
...@@ -1632,7 +1632,7 @@ static void collapse_file(struct mm_struct *mm, ...@@ -1632,7 +1632,7 @@ static void collapse_file(struct mm_struct *mm,
goto out; goto out;
} }
if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) { if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
result = SCAN_CGROUP_CHARGE_FAIL; result = SCAN_CGROUP_CHARGE_FAIL;
goto out; goto out;
} }
......
...@@ -2655,51 +2655,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) ...@@ -2655,51 +2655,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
} }
#endif #endif
static void lock_page_lru(struct page *page, int *isolated) static void commit_charge(struct page *page, struct mem_cgroup *memcg)
{ {
pg_data_t *pgdat = page_pgdat(page);
spin_lock_irq(&pgdat->lru_lock);
if (PageLRU(page)) {
struct lruvec *lruvec;
lruvec = mem_cgroup_page_lruvec(page, pgdat);
ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_lru(page));
*isolated = 1;
} else
*isolated = 0;
}
static void unlock_page_lru(struct page *page, int isolated)
{
pg_data_t *pgdat = page_pgdat(page);
if (isolated) {
struct lruvec *lruvec;
lruvec = mem_cgroup_page_lruvec(page, pgdat);
VM_BUG_ON_PAGE(PageLRU(page), page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, page_lru(page));
}
spin_unlock_irq(&pgdat->lru_lock);
}
static void commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare)
{
int isolated;
VM_BUG_ON_PAGE(page->mem_cgroup, page); VM_BUG_ON_PAGE(page->mem_cgroup, page);
/*
* In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
* may already be on some other mem_cgroup's LRU. Take care of it.
*/
if (lrucare)
lock_page_lru(page, &isolated);
/* /*
* Nobody should be changing or seriously looking at * Nobody should be changing or seriously looking at
* page->mem_cgroup at this point: * page->mem_cgroup at this point:
...@@ -2715,9 +2673,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, ...@@ -2715,9 +2673,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
* have the page locked * have the page locked
*/ */
page->mem_cgroup = memcg; page->mem_cgroup = memcg;
if (lrucare)
unlock_page_lru(page, isolated);
} }
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
...@@ -6503,22 +6458,18 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, ...@@ -6503,22 +6458,18 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
* @page: page to charge * @page: page to charge
* @mm: mm context of the victim * @mm: mm context of the victim
* @gfp_mask: reclaim mode * @gfp_mask: reclaim mode
* @lrucare: page might be on the LRU already
* *
* Try to charge @page to the memcg that @mm belongs to, reclaiming * Try to charge @page to the memcg that @mm belongs to, reclaiming
* pages according to @gfp_mask if necessary. * pages according to @gfp_mask if necessary.
* *
* Returns 0 on success. Otherwise, an error code is returned. * Returns 0 on success. Otherwise, an error code is returned.
*/ */
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
bool lrucare)
{ {
unsigned int nr_pages = hpage_nr_pages(page); unsigned int nr_pages = hpage_nr_pages(page);
struct mem_cgroup *memcg = NULL; struct mem_cgroup *memcg = NULL;
int ret = 0; int ret = 0;
VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
if (mem_cgroup_disabled()) if (mem_cgroup_disabled())
goto out; goto out;
...@@ -6552,7 +6503,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask, ...@@ -6552,7 +6503,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
if (ret) if (ret)
goto out_put; goto out_put;
commit_charge(page, memcg, lrucare); commit_charge(page, memcg);
local_irq_disable(); local_irq_disable();
mem_cgroup_charge_statistics(memcg, page, nr_pages); mem_cgroup_charge_statistics(memcg, page, nr_pages);
...@@ -6753,7 +6704,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) ...@@ -6753,7 +6704,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
page_counter_charge(&memcg->memsw, nr_pages); page_counter_charge(&memcg->memsw, nr_pages);
css_get_many(&memcg->css, nr_pages); css_get_many(&memcg->css, nr_pages);
commit_charge(newpage, memcg, false); commit_charge(newpage, memcg);
local_irq_save(flags); local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, newpage, nr_pages); mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
......
...@@ -2675,7 +2675,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -2675,7 +2675,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
} }
} }
if (mem_cgroup_charge(new_page, mm, GFP_KERNEL, false)) if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new; goto oom_free_new;
cgroup_throttle_swaprate(new_page, GFP_KERNEL); cgroup_throttle_swaprate(new_page, GFP_KERNEL);
...@@ -3134,7 +3134,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3134,7 +3134,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
/* Tell memcg to use swap ownership records */ /* Tell memcg to use swap ownership records */
SetPageSwapCache(page); SetPageSwapCache(page);
err = mem_cgroup_charge(page, vma->vm_mm, err = mem_cgroup_charge(page, vma->vm_mm,
GFP_KERNEL, false); GFP_KERNEL);
ClearPageSwapCache(page); ClearPageSwapCache(page);
if (err) if (err)
goto out_page; goto out_page;
...@@ -3358,7 +3358,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) ...@@ -3358,7 +3358,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
if (!page) if (!page)
goto oom; goto oom;
if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false)) if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
goto oom_free_page; goto oom_free_page;
cgroup_throttle_swaprate(page, GFP_KERNEL); cgroup_throttle_swaprate(page, GFP_KERNEL);
...@@ -3854,7 +3854,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) ...@@ -3854,7 +3854,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
if (!vmf->cow_page) if (!vmf->cow_page)
return VM_FAULT_OOM; return VM_FAULT_OOM;
if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, false)) { if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
put_page(vmf->cow_page); put_page(vmf->cow_page);
return VM_FAULT_OOM; return VM_FAULT_OOM;
} }
......
...@@ -2786,7 +2786,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, ...@@ -2786,7 +2786,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
goto abort; goto abort;
if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL, false)) if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
goto abort; goto abort;
/* /*
......
...@@ -624,7 +624,7 @@ static int shmem_add_to_page_cache(struct page *page, ...@@ -624,7 +624,7 @@ static int shmem_add_to_page_cache(struct page *page,
page->index = index; page->index = index;
if (!PageSwapCache(page)) { if (!PageSwapCache(page)) {
error = mem_cgroup_charge(page, charge_mm, gfp, false); error = mem_cgroup_charge(page, charge_mm, gfp);
if (error) { if (error) {
if (PageTransHuge(page)) { if (PageTransHuge(page)) {
count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK);
......
...@@ -435,7 +435,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -435,7 +435,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_unlock; goto fail_unlock;
} }
if (mem_cgroup_charge(page, NULL, gfp_mask, false)) { if (mem_cgroup_charge(page, NULL, gfp_mask)) {
delete_from_swap_cache(page); delete_from_swap_cache(page);
goto fail_unlock; goto fail_unlock;
} }
......
...@@ -96,7 +96,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -96,7 +96,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
__SetPageUptodate(page); __SetPageUptodate(page);
ret = -ENOMEM; ret = -ENOMEM;
if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL, false)) if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
goto out_release; goto out_release;
_dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot)); _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment