Commit d44d363f authored by Shaohua Li's avatar Shaohua Li Committed by Linus Torvalds

mm: don't assume anonymous pages have SwapBacked flag

There are a few places the code assumes anonymous pages should have
SwapBacked flag set.  MADV_FREE pages are anonymous pages but we are
going to add them to LRU_INACTIVE_FILE list and clear SwapBacked flag
for them.  The assumption doesn't hold any more, so fix them.

Link: http://lkml.kernel.org/r/3945232c0df3dd6c4ef001976f35a95f18dcb407.1487965799.git.shli@fb.comSigned-off-by: default avatarShaohua Li <shli@fb.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a128ca71
...@@ -2399,7 +2399,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) ...@@ -2399,7 +2399,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
VM_BUG_ON_PAGE(is_huge_zero_page(page), page); VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
VM_BUG_ON_PAGE(!PageCompound(page), page); VM_BUG_ON_PAGE(!PageCompound(page), page);
if (PageAnon(head)) { if (PageAnon(head)) {
......
...@@ -483,8 +483,7 @@ void __khugepaged_exit(struct mm_struct *mm) ...@@ -483,8 +483,7 @@ void __khugepaged_exit(struct mm_struct *mm)
static void release_pte_page(struct page *page) static void release_pte_page(struct page *page)
{ {
/* 0 stands for page_is_file_cache(page) == false */ dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
dec_node_page_state(page, NR_ISOLATED_ANON + 0);
unlock_page(page); unlock_page(page);
putback_lru_page(page); putback_lru_page(page);
} }
...@@ -532,7 +531,6 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -532,7 +531,6 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
/* /*
* We can do it before isolate_lru_page because the * We can do it before isolate_lru_page because the
...@@ -579,8 +577,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -579,8 +577,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_DEL_PAGE_LRU; result = SCAN_DEL_PAGE_LRU;
goto out; goto out;
} }
/* 0 stands for page_is_file_cache(page) == false */ inc_node_page_state(page,
inc_node_page_state(page, NR_ISOLATED_ANON + 0); NR_ISOLATED_ANON + page_is_file_cache(page));
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(PageLRU(page), page);
......
...@@ -1944,7 +1944,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1944,7 +1944,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
/* Prepare a page as a migration target */ /* Prepare a page as a migration target */
__SetPageLocked(new_page); __SetPageLocked(new_page);
__SetPageSwapBacked(new_page); if (PageSwapBacked(page))
__SetPageSwapBacked(new_page);
/* anon mapping, we can simply copy page->mapping to the new page: */ /* anon mapping, we can simply copy page->mapping to the new page: */
new_page->mapping = page->mapping; new_page->mapping = page->mapping;
......
...@@ -1424,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1424,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* Store the swap location in the pte. * Store the swap location in the pte.
* See handle_pte_fault() ... * See handle_pte_fault() ...
*/ */
VM_BUG_ON_PAGE(!PageSwapCache(page), page); VM_BUG_ON_PAGE(!PageSwapCache(page) && PageSwapBacked(page),
page);
if (!PageDirty(page)) { if (!PageDirty(page)) {
/* It's a freeable page by MADV_FREE */ /* It's a freeable page by MADV_FREE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment