Commit 8186eb6a authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: rmap: split out page_remove_file_rmap()

page_remove_rmap() has too many branches on PageAnon() and is hard to
follow.  Move the file part into a separate function.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Vladimir Davydov <vdavydov@parallels.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d7365e78
...@@ -1054,6 +1054,36 @@ void page_add_file_rmap(struct page *page) ...@@ -1054,6 +1054,36 @@ void page_add_file_rmap(struct page *page)
mem_cgroup_end_page_stat(memcg, locked, flags); mem_cgroup_end_page_stat(memcg, locked, flags);
} }
static void page_remove_file_rmap(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
bool locked;
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
goto out;
/* Hugepages are not counted in NR_FILE_MAPPED for now. */
if (unlikely(PageHuge(page)))
goto out;
/*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
* pte lock(a spinlock) is held, which implies preemption disabled.
*/
__dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
out:
mem_cgroup_end_page_stat(memcg, locked, flags);
}
/** /**
* page_remove_rmap - take down pte mapping from a page * page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from * @page: page to remove mapping from
...@@ -1062,46 +1092,33 @@ void page_add_file_rmap(struct page *page) ...@@ -1062,46 +1092,33 @@ void page_add_file_rmap(struct page *page)
*/ */
void page_remove_rmap(struct page *page) void page_remove_rmap(struct page *page)
{ {
struct mem_cgroup *uninitialized_var(memcg); if (!PageAnon(page)) {
bool anon = PageAnon(page); page_remove_file_rmap(page);
unsigned long flags; return;
bool locked; }
/*
* The anon case has no mem_cgroup page_stat to update; but may
* uncharge_page() below, where the lock ordering can deadlock if
* we hold the lock against page_stat move: so avoid it on anon.
*/
if (!anon)
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
/* page still mapped by someone else? */ /* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount)) if (!atomic_add_negative(-1, &page->_mapcount))
goto out; return;
/* Hugepages are not counted in NR_ANON_PAGES for now. */
if (unlikely(PageHuge(page)))
return;
/* /*
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
* and not charged by memcg for now.
*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because * We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and * these counters are not modified in interrupt context, and
* these counters are not modified in interrupt context, and
* pte lock(a spinlock) is held, which implies preemption disabled. * pte lock(a spinlock) is held, which implies preemption disabled.
*/ */
if (unlikely(PageHuge(page)))
goto out;
if (anon) {
if (PageTransHuge(page)) if (PageTransHuge(page))
__dec_zone_page_state(page, __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES, __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
-hpage_nr_pages(page)); -hpage_nr_pages(page));
} else {
__dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
}
if (unlikely(PageMlocked(page))) if (unlikely(PageMlocked(page)))
clear_page_mlock(page); clear_page_mlock(page);
/* /*
* It would be tidy to reset the PageAnon mapping here, * It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap * but that might overwrite a racing page_add_anon_rmap
...@@ -1111,9 +1128,6 @@ void page_remove_rmap(struct page *page) ...@@ -1111,9 +1128,6 @@ void page_remove_rmap(struct page *page)
* Leaving it set also helps swapoff to reinstate ptes * Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache. * faster for those pages still in swapcache.
*/ */
out:
if (!anon)
mem_cgroup_end_page_stat(memcg, locked, flags);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment