Commit 50757018 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Vlastimil Babka

mm/slob: Convert SLOB to use struct slab and struct folio

Use struct slab throughout the slob allocator. Where non-slab page can
appear use struct folio instead of struct page.

[ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h
  just for the single callers being wrappers in mm/slob.c ]

[ Hyeonggon Yoo <42.hyeyoo@gmail.com>: fix NULL pointer deference ]
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
parent 4b5f8d9a
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* alloc_pages() directly, allocating compound pages so the page order * alloc_pages() directly, allocating compound pages so the page order
* does not have to be separately tracked. * does not have to be separately tracked.
* These objects are detected in kfree() because PageSlab() * These objects are detected in kfree() because folio_test_slab()
* is false for them. * is false for them.
* *
* SLAB is emulated on top of SLOB by simply calling constructors and * SLAB is emulated on top of SLOB by simply calling constructors and
...@@ -105,21 +105,21 @@ static LIST_HEAD(free_slob_large); ...@@ -105,21 +105,21 @@ static LIST_HEAD(free_slob_large);
/* /*
* slob_page_free: true for pages on free_slob_pages list. * slob_page_free: true for pages on free_slob_pages list.
*/ */
static inline int slob_page_free(struct page *sp) static inline int slob_page_free(struct slab *slab)
{ {
return PageSlobFree(sp); return PageSlobFree(slab_page(slab));
} }
static void set_slob_page_free(struct page *sp, struct list_head *list) static void set_slob_page_free(struct slab *slab, struct list_head *list)
{ {
list_add(&sp->slab_list, list); list_add(&slab->slab_list, list);
__SetPageSlobFree(sp); __SetPageSlobFree(slab_page(slab));
} }
static inline void clear_slob_page_free(struct page *sp) static inline void clear_slob_page_free(struct slab *slab)
{ {
list_del(&sp->slab_list); list_del(&slab->slab_list);
__ClearPageSlobFree(sp); __ClearPageSlobFree(slab_page(slab));
} }
#define SLOB_UNIT sizeof(slob_t) #define SLOB_UNIT sizeof(slob_t)
...@@ -234,7 +234,7 @@ static void slob_free_pages(void *b, int order) ...@@ -234,7 +234,7 @@ static void slob_free_pages(void *b, int order)
* freelist, in this case @page_removed_from_list will be set to * freelist, in this case @page_removed_from_list will be set to
* true (set to false otherwise). * true (set to false otherwise).
*/ */
static void *slob_page_alloc(struct page *sp, size_t size, int align, static void *slob_page_alloc(struct slab *sp, size_t size, int align,
int align_offset, bool *page_removed_from_list) int align_offset, bool *page_removed_from_list)
{ {
slob_t *prev, *cur, *aligned = NULL; slob_t *prev, *cur, *aligned = NULL;
...@@ -301,7 +301,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align, ...@@ -301,7 +301,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
int align_offset) int align_offset)
{ {
struct page *sp; struct folio *folio;
struct slab *sp;
struct list_head *slob_list; struct list_head *slob_list;
slob_t *b = NULL; slob_t *b = NULL;
unsigned long flags; unsigned long flags;
...@@ -323,7 +324,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, ...@@ -323,7 +324,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
* If there's a node specification, search for a partial * If there's a node specification, search for a partial
* page with a matching node id in the freelist. * page with a matching node id in the freelist.
*/ */
if (node != NUMA_NO_NODE && page_to_nid(sp) != node) if (node != NUMA_NO_NODE && slab_nid(sp) != node)
continue; continue;
#endif #endif
/* Enough room on this page? */ /* Enough room on this page? */
...@@ -358,8 +359,9 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, ...@@ -358,8 +359,9 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b) if (!b)
return NULL; return NULL;
sp = virt_to_page(b); folio = virt_to_folio(b);
__SetPageSlab(sp); __folio_set_slab(folio);
sp = folio_slab(folio);
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE); sp->units = SLOB_UNITS(PAGE_SIZE);
...@@ -381,7 +383,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, ...@@ -381,7 +383,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
*/ */
static void slob_free(void *block, int size) static void slob_free(void *block, int size)
{ {
struct page *sp; struct slab *sp;
slob_t *prev, *next, *b = (slob_t *)block; slob_t *prev, *next, *b = (slob_t *)block;
slobidx_t units; slobidx_t units;
unsigned long flags; unsigned long flags;
...@@ -391,7 +393,7 @@ static void slob_free(void *block, int size) ...@@ -391,7 +393,7 @@ static void slob_free(void *block, int size)
return; return;
BUG_ON(!size); BUG_ON(!size);
sp = virt_to_page(block); sp = virt_to_slab(block);
units = SLOB_UNITS(size); units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
...@@ -401,8 +403,8 @@ static void slob_free(void *block, int size) ...@@ -401,8 +403,8 @@ static void slob_free(void *block, int size)
if (slob_page_free(sp)) if (slob_page_free(sp))
clear_slob_page_free(sp); clear_slob_page_free(sp);
spin_unlock_irqrestore(&slob_lock, flags); spin_unlock_irqrestore(&slob_lock, flags);
__ClearPageSlab(sp); __folio_clear_slab(slab_folio(sp));
page_mapcount_reset(sp); page_mapcount_reset(slab_page(sp));
slob_free_pages(b, 0); slob_free_pages(b, 0);
return; return;
} }
...@@ -544,7 +546,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller); ...@@ -544,7 +546,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
void kfree(const void *block) void kfree(const void *block)
{ {
struct page *sp; struct folio *sp;
trace_kfree(_RET_IP_, block); trace_kfree(_RET_IP_, block);
...@@ -552,16 +554,17 @@ void kfree(const void *block) ...@@ -552,16 +554,17 @@ void kfree(const void *block)
return; return;
kmemleak_free(block); kmemleak_free(block);
sp = virt_to_page(block); sp = virt_to_folio(block);
if (PageSlab(sp)) { if (folio_test_slab(sp)) {
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align); unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align); slob_free(m, *m + align);
} else { } else {
unsigned int order = compound_order(sp); unsigned int order = folio_order(sp);
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
mod_node_page_state(folio_pgdat(sp), NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order)); -(PAGE_SIZE << order));
__free_pages(sp, order); __free_pages(folio_page(sp, 0), order);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment