Commit 6e292b9b authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

mm: split page_type out from _mapcount

We're already using a union of many fields here, so stop abusing the
_mapcount and make page_type its own field.  That implies renaming some of
the machinery that creates PageBuddy, PageBalloon and PageKmemcg; bring
back the PG_buddy, PG_balloon and PG_kmemcg names.

As suggested by Kirill, make page_type a bitmask.  Because it starts out
life as -1 (thanks to sharing the storage with _mapcount), setting a page
flag means clearing the appropriate bit.  This gives us space for probably
twenty or so extra bits (depending how paranoid we want to be about
_mapcount underflow).

Link: http://lkml.kernel.org/r/20180518194519.3820-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 620b4e90
...@@ -96,6 +96,14 @@ struct page { ...@@ -96,6 +96,14 @@ struct page {
}; };
union { union {
/*
* If the page is neither PageSlab nor mappable to userspace,
* the value stored here may help determine what this page
* is used for. See page-flags.h for a list of page types
* which are currently stored here.
*/
unsigned int page_type;
_slub_counter_t counters; _slub_counter_t counters;
unsigned int active; /* SLAB */ unsigned int active; /* SLAB */
struct { /* SLUB */ struct { /* SLUB */
...@@ -109,11 +117,6 @@ struct page { ...@@ -109,11 +117,6 @@ struct page {
/* /*
* Count of ptes mapped in mms, to show when * Count of ptes mapped in mms, to show when
* page is mapped & limit reverse map searches. * page is mapped & limit reverse map searches.
*
* Extra information about page type may be
* stored here for pages that are never mapped,
* in which case the value MUST BE <= -2.
* See page-flags.h for more details.
*/ */
atomic_t _mapcount; atomic_t _mapcount;
......
...@@ -642,49 +642,56 @@ PAGEFLAG_FALSE(DoubleMap) ...@@ -642,49 +642,56 @@ PAGEFLAG_FALSE(DoubleMap)
#endif #endif
/* /*
* For pages that are never mapped to userspace, page->mapcount may be * For pages that are never mapped to userspace (and aren't PageSlab),
* used for storing extra information about page type. Any value used * page_type may be used. Because it is initialised to -1, we invert the
* for this purpose must be <= -2, but it's better start not too close * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
* to -2 so that an underflow of the page_mapcount() won't be mistaken * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
* for a special page. * low bits so that an underflow or overflow of page_mapcount() won't be
* mistaken for a page type value.
*/ */
#define PAGE_MAPCOUNT_OPS(uname, lname) \
#define PAGE_TYPE_BASE 0xf0000000
/* Reserve 0x0000007f to catch underflows of page_mapcount */
#define PG_buddy 0x00000080
#define PG_balloon 0x00000100
#define PG_kmemcg 0x00000200
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
#define PAGE_TYPE_OPS(uname, lname) \
static __always_inline int Page##uname(struct page *page) \ static __always_inline int Page##uname(struct page *page) \
{ \ { \
return atomic_read(&page->_mapcount) == \ return PageType(page, PG_##lname); \
PAGE_##lname##_MAPCOUNT_VALUE; \
} \ } \
static __always_inline void __SetPage##uname(struct page *page) \ static __always_inline void __SetPage##uname(struct page *page) \
{ \ { \
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ VM_BUG_ON_PAGE(!PageType(page, 0), page); \
atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ page->page_type &= ~PG_##lname; \
} \ } \
static __always_inline void __ClearPage##uname(struct page *page) \ static __always_inline void __ClearPage##uname(struct page *page) \
{ \ { \
VM_BUG_ON_PAGE(!Page##uname(page), page); \ VM_BUG_ON_PAGE(!Page##uname(page), page); \
atomic_set(&page->_mapcount, -1); \ page->page_type |= PG_##lname; \
} }
/* /*
* PageBuddy() indicate that the page is free and in the buddy system * PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c). * (see mm/page_alloc.c).
*/ */
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) PAGE_TYPE_OPS(Buddy, buddy)
PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
/* /*
* PageBalloon() is set on pages that are on the balloon page list * PageBalloon() is true for pages that are on the balloon page list
* (see mm/balloon_compaction.c). * (see mm/balloon_compaction.c).
*/ */
#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) PAGE_TYPE_OPS(Balloon, balloon)
PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
/* /*
* If kmemcg is enabled, the buddy allocator will set PageKmemcg() on * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
* pages allocated with __GFP_ACCOUNT. It gets cleared on page free. * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
*/ */
#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) PAGE_TYPE_OPS(Kmemcg, kmemcg)
PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
extern bool is_free_buddy_page(struct page *page); extern bool is_free_buddy_page(struct page *page);
......
...@@ -460,6 +460,7 @@ static int __init crash_save_vmcoreinfo_init(void) ...@@ -460,6 +460,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_hwpoison); VMCOREINFO_NUMBER(PG_hwpoison);
#endif #endif
VMCOREINFO_NUMBER(PG_head_mask); VMCOREINFO_NUMBER(PG_head_mask);
#define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR); VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
......
...@@ -705,16 +705,14 @@ static inline void rmv_page_order(struct page *page) ...@@ -705,16 +705,14 @@ static inline void rmv_page_order(struct page *page)
/* /*
* This function checks whether a page is free && is the buddy * This function checks whether a page is free && is the buddy
* we can do coalesce a page and its buddy if * we can coalesce a page and its buddy if
* (a) the buddy is not in a hole (check before calling!) && * (a) the buddy is not in a hole (check before calling!) &&
* (b) the buddy is in the buddy system && * (b) the buddy is in the buddy system &&
* (c) a page and its buddy have the same order && * (c) a page and its buddy have the same order &&
* (d) a page and its buddy are in the same zone. * (d) a page and its buddy are in the same zone.
* *
* For recording whether a page is in the buddy system, we set ->_mapcount * For recording whether a page is in the buddy system, we set PageBuddy.
* PAGE_BUDDY_MAPCOUNT_VALUE. * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
* Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
* serialized by zone->lock.
* *
* For recording page's order, we use page_private(page). * For recording page's order, we use page_private(page).
*/ */
...@@ -759,9 +757,8 @@ static inline int page_is_buddy(struct page *page, struct page *buddy, ...@@ -759,9 +757,8 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* as necessary, plus some accounting needed to play nicely with other * as necessary, plus some accounting needed to play nicely with other
* parts of the VM system. * parts of the VM system.
* At each level, we keep a list of pages, which are heads of continuous * At each level, we keep a list of pages, which are heads of continuous
* free pages of length of (1 << order) and marked with _mapcount * free pages of length of (1 << order) and marked with PageBuddy.
* PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) * Page's order is recorded in page_private(page) field.
* field.
* So when we are allocating or freeing one, we can derive the state of the * So when we are allocating or freeing one, we can derive the state of the
* other. That is, if we allocate a small block, and both were * other. That is, if we allocate a small block, and both were
* free, the remainder of the region must be split into blocks. * free, the remainder of the region must be split into blocks.
......
...@@ -179,9 +179,9 @@ regex_c=( ...@@ -179,9 +179,9 @@ regex_c=(
'/\<CLEARPAGEFLAG_NOOP(\([[:alnum:]_]*\).*/ClearPage\1/' '/\<CLEARPAGEFLAG_NOOP(\([[:alnum:]_]*\).*/ClearPage\1/'
'/\<__CLEARPAGEFLAG_NOOP(\([[:alnum:]_]*\).*/__ClearPage\1/' '/\<__CLEARPAGEFLAG_NOOP(\([[:alnum:]_]*\).*/__ClearPage\1/'
'/\<TESTCLEARFLAG_FALSE(\([[:alnum:]_]*\).*/TestClearPage\1/' '/\<TESTCLEARFLAG_FALSE(\([[:alnum:]_]*\).*/TestClearPage\1/'
'/^PAGE_MAPCOUNT_OPS(\([[:alnum:]_]*\).*/Page\1/' '/^PAGE_TYPE_OPS(\([[:alnum:]_]*\).*/Page\1/'
'/^PAGE_MAPCOUNT_OPS(\([[:alnum:]_]*\).*/__SetPage\1/' '/^PAGE_TYPE_OPS(\([[:alnum:]_]*\).*/__SetPage\1/'
'/^PAGE_MAPCOUNT_OPS(\([[:alnum:]_]*\).*/__ClearPage\1/' '/^PAGE_TYPE_OPS(\([[:alnum:]_]*\).*/__ClearPage\1/'
'/^TASK_PFA_TEST([^,]*, *\([[:alnum:]_]*\))/task_\1/' '/^TASK_PFA_TEST([^,]*, *\([[:alnum:]_]*\))/task_\1/'
'/^TASK_PFA_SET([^,]*, *\([[:alnum:]_]*\))/task_set_\1/' '/^TASK_PFA_SET([^,]*, *\([[:alnum:]_]*\))/task_set_\1/'
'/^TASK_PFA_CLEAR([^,]*, *\([[:alnum:]_]*\))/task_clear_\1/' '/^TASK_PFA_CLEAR([^,]*, *\([[:alnum:]_]*\))/task_clear_\1/'
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment