Commit 6326fec1 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds

mm: Use owner_priv bit for PageSwapCache, valid when PageSwapBacked

A page is not added to the swap cache without being swap backed,
so PageSwapBacked mappings can use PG_owner_priv_1 for PageSwapCache.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Andrew Lutomirski <luto@kernel.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7c0f6ba6
...@@ -87,7 +87,6 @@ enum pageflags { ...@@ -87,7 +87,6 @@ enum pageflags {
PG_private_2, /* If pagecache, has fs aux data */ PG_private_2, /* If pagecache, has fs aux data */
PG_writeback, /* Page is under writeback */ PG_writeback, /* Page is under writeback */
PG_head, /* A head page */ PG_head, /* A head page */
PG_swapcache, /* Swap page: swp_entry_t in private */
PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */ PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */ PG_swapbacked, /* Page is backed by RAM/swap */
...@@ -110,6 +109,9 @@ enum pageflags { ...@@ -110,6 +109,9 @@ enum pageflags {
/* Filesystems */ /* Filesystems */
PG_checked = PG_owner_priv_1, PG_checked = PG_owner_priv_1,
/* SwapBacked */
PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
/* Two page bits are conscripted by FS-Cache to maintain local caching /* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes * state. These bits are set on pages belonging to the netfs's inodes
* when those inodes are being locally cached. * when those inodes are being locally cached.
...@@ -314,7 +316,13 @@ PAGEFLAG_FALSE(HighMem) ...@@ -314,7 +316,13 @@ PAGEFLAG_FALSE(HighMem)
#endif #endif
#ifdef CONFIG_SWAP #ifdef CONFIG_SWAP
PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND) static __always_inline int PageSwapCache(struct page *page)
{
return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
}
SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
#else #else
PAGEFLAG_FALSE(SwapCache) PAGEFLAG_FALSE(SwapCache)
#endif #endif
...@@ -701,12 +709,12 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) ...@@ -701,12 +709,12 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* Flags checked when a page is freed. Pages being freed should not have * Flags checked when a page is freed. Pages being freed should not have
* these flags set. It they are, there is a problem. * these flags set. It they are, there is a problem.
*/ */
#define PAGE_FLAGS_CHECK_AT_FREE \ #define PAGE_FLAGS_CHECK_AT_FREE \
(1UL << PG_lru | 1UL << PG_locked | \ (1UL << PG_lru | 1UL << PG_locked | \
1UL << PG_private | 1UL << PG_private_2 | \ 1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \ 1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \ 1UL << PG_slab | 1UL << PG_active | \
1UL << PG_unevictable | __PG_MLOCKED) 1UL << PG_unevictable | __PG_MLOCKED)
/* /*
* Flags checked when a page is prepped for return by the page allocator. * Flags checked when a page is prepped for return by the page allocator.
......
...@@ -95,7 +95,6 @@ ...@@ -95,7 +95,6 @@
{1UL << PG_private_2, "private_2" }, \ {1UL << PG_private_2, "private_2" }, \
{1UL << PG_writeback, "writeback" }, \ {1UL << PG_writeback, "writeback" }, \
{1UL << PG_head, "head" }, \ {1UL << PG_head, "head" }, \
{1UL << PG_swapcache, "swapcache" }, \
{1UL << PG_mappedtodisk, "mappedtodisk" }, \ {1UL << PG_mappedtodisk, "mappedtodisk" }, \
{1UL << PG_reclaim, "reclaim" }, \ {1UL << PG_reclaim, "reclaim" }, \
{1UL << PG_swapbacked, "swapbacked" }, \ {1UL << PG_swapbacked, "swapbacked" }, \
......
...@@ -764,12 +764,11 @@ static int me_huge_page(struct page *p, unsigned long pfn) ...@@ -764,12 +764,11 @@ static int me_huge_page(struct page *p, unsigned long pfn)
*/ */
#define dirty (1UL << PG_dirty) #define dirty (1UL << PG_dirty)
#define sc (1UL << PG_swapcache) #define sc ((1UL << PG_swapcache) | (1UL << PG_swapbacked))
#define unevict (1UL << PG_unevictable) #define unevict (1UL << PG_unevictable)
#define mlock (1UL << PG_mlocked) #define mlock (1UL << PG_mlocked)
#define writeback (1UL << PG_writeback) #define writeback (1UL << PG_writeback)
#define lru (1UL << PG_lru) #define lru (1UL << PG_lru)
#define swapbacked (1UL << PG_swapbacked)
#define head (1UL << PG_head) #define head (1UL << PG_head)
#define slab (1UL << PG_slab) #define slab (1UL << PG_slab)
#define reserved (1UL << PG_reserved) #define reserved (1UL << PG_reserved)
...@@ -819,7 +818,6 @@ static struct page_state { ...@@ -819,7 +818,6 @@ static struct page_state {
#undef mlock #undef mlock
#undef writeback #undef writeback
#undef lru #undef lru
#undef swapbacked
#undef head #undef head
#undef slab #undef slab
#undef reserved #undef reserved
......
...@@ -466,13 +466,15 @@ int migrate_page_move_mapping(struct address_space *mapping, ...@@ -466,13 +466,15 @@ int migrate_page_move_mapping(struct address_space *mapping,
*/ */
newpage->index = page->index; newpage->index = page->index;
newpage->mapping = page->mapping; newpage->mapping = page->mapping;
if (PageSwapBacked(page))
__SetPageSwapBacked(newpage);
get_page(newpage); /* add cache reference */ get_page(newpage); /* add cache reference */
if (PageSwapCache(page)) { if (PageSwapBacked(page)) {
SetPageSwapCache(newpage); __SetPageSwapBacked(newpage);
set_page_private(newpage, page_private(page)); if (PageSwapCache(page)) {
SetPageSwapCache(newpage);
set_page_private(newpage, page_private(page));
}
} else {
VM_BUG_ON_PAGE(PageSwapCache(page), page);
} }
/* Move dirty while page refs frozen and newpage not yet exposed */ /* Move dirty while page refs frozen and newpage not yet exposed */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment