Commit d4fc5069 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

mm: switch s_mem and slab_cache in struct page

This will allow us to store slub's counters in the same bits as slab's
s_mem.  slub now needs to set page->mapping to NULL as it frees the page,
just like slab does.

Link: http://lkml.kernel.org/r/20180518194519.3820-5-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <mawilcox@microsoft.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1d40a5ea
...@@ -83,7 +83,7 @@ struct page { ...@@ -83,7 +83,7 @@ struct page {
/* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */ /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
struct address_space *mapping; struct address_space *mapping;
void *s_mem; /* slab first object */ struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
atomic_t compound_mapcount; /* first tail page */ atomic_t compound_mapcount; /* first tail page */
/* page_deferred_list().next -- second tail page */ /* page_deferred_list().next -- second tail page */
}; };
...@@ -194,7 +194,7 @@ struct page { ...@@ -194,7 +194,7 @@ struct page {
spinlock_t ptl; spinlock_t ptl;
#endif #endif
#endif #endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ void *s_mem; /* slab first object */
}; };
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
......
...@@ -1695,6 +1695,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) ...@@ -1695,6 +1695,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
__ClearPageSlab(page); __ClearPageSlab(page);
page_mapcount_reset(page); page_mapcount_reset(page);
page->mapping = NULL;
if (current->reclaim_state) if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages; current->reclaim_state->reclaimed_slab += pages;
memcg_uncharge_slab(page, order, s); memcg_uncharge_slab(page, order, s);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment