Commit 6e9ed0cc authored by Américo Wang's avatar Américo Wang Committed by Pekka Enberg

slob: clean up the code

- Use NULL instead of plain 0;
- Rename slob_page() to is_slob_page();
- Define slob_page() to convert void* to struct slob_page*;
- Rename slob_new_page() to slob_new_pages();
- Define slob_free_pages() accordingly.

Compile tests only.
Signed-off-by: default avatarWANG Cong <wangcong@zeuux.org>
Signed-off-by: default avatarMatt Mackall <mpm@selenic.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
parent a6525042
...@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium); ...@@ -126,9 +126,9 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large); static LIST_HEAD(free_slob_large);
/* /*
* slob_page: True for all slob pages (false for bigblock pages) * is_slob_page: True for all slob pages (false for bigblock pages)
*/ */
static inline int slob_page(struct slob_page *sp) static inline int is_slob_page(struct slob_page *sp)
{ {
return PageSlobPage((struct page *)sp); return PageSlobPage((struct page *)sp);
} }
...@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp) ...@@ -143,6 +143,11 @@ static inline void clear_slob_page(struct slob_page *sp)
__ClearPageSlobPage((struct page *)sp); __ClearPageSlobPage((struct page *)sp);
} }
static inline struct slob_page *slob_page(const void *addr)
{
return (struct slob_page *)virt_to_page(addr);
}
/* /*
* slob_page_free: true for pages on free_slob_pages list. * slob_page_free: true for pages on free_slob_pages list.
*/ */
...@@ -230,7 +235,7 @@ static int slob_last(slob_t *s) ...@@ -230,7 +235,7 @@ static int slob_last(slob_t *s)
return !((unsigned long)slob_next(s) & ~PAGE_MASK); return !((unsigned long)slob_next(s) & ~PAGE_MASK);
} }
static void *slob_new_page(gfp_t gfp, int order, int node) static void *slob_new_pages(gfp_t gfp, int order, int node)
{ {
void *page; void *page;
...@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node) ...@@ -247,12 +252,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
return page_address(page); return page_address(page);
} }
static void slob_free_pages(void *b, int order)
{
free_pages((unsigned long)b, order);
}
/* /*
* Allocate a slob block within a given slob_page sp. * Allocate a slob block within a given slob_page sp.
*/ */
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{ {
slob_t *prev, *cur, *aligned = 0; slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size); int delta = 0, units = SLOB_UNITS(size);
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
...@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) ...@@ -349,10 +359,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */ /* Not enough space: must allocate a new page */
if (!b) { if (!b) {
b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b) if (!b)
return 0; return NULL;
sp = (struct slob_page *)virt_to_page(b); sp = slob_page(b);
set_slob_page(sp); set_slob_page(sp);
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
...@@ -384,7 +394,7 @@ static void slob_free(void *block, int size) ...@@ -384,7 +394,7 @@ static void slob_free(void *block, int size)
return; return;
BUG_ON(!size); BUG_ON(!size);
sp = (struct slob_page *)virt_to_page(block); sp = slob_page(block);
units = SLOB_UNITS(size); units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags); spin_lock_irqsave(&slob_lock, flags);
...@@ -476,7 +486,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) ...@@ -476,7 +486,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else { } else {
void *ret; void *ret;
ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) { if (ret) {
struct page *page; struct page *page;
page = virt_to_page(ret); page = virt_to_page(ret);
...@@ -494,8 +504,8 @@ void kfree(const void *block) ...@@ -494,8 +504,8 @@ void kfree(const void *block)
if (unlikely(ZERO_OR_NULL_PTR(block))) if (unlikely(ZERO_OR_NULL_PTR(block)))
return; return;
sp = (struct slob_page *)virt_to_page(block); sp = slob_page(block);
if (slob_page(sp)) { if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align); unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align); slob_free(m, *m + align);
...@@ -513,8 +523,8 @@ size_t ksize(const void *block) ...@@ -513,8 +523,8 @@ size_t ksize(const void *block)
if (unlikely(block == ZERO_SIZE_PTR)) if (unlikely(block == ZERO_SIZE_PTR))
return 0; return 0;
sp = (struct slob_page *)virt_to_page(block); sp = slob_page(block);
if (slob_page(sp)) { if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align); unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT; return SLOB_UNITS(*m) * SLOB_UNIT;
...@@ -572,7 +582,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) ...@@ -572,7 +582,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align, node); b = slob_alloc(c->size, flags, c->align, node);
else else
b = slob_new_page(flags, get_order(c->size), node); b = slob_new_pages(flags, get_order(c->size), node);
if (c->ctor) if (c->ctor)
c->ctor(b); c->ctor(b);
...@@ -586,7 +596,7 @@ static void __kmem_cache_free(void *b, int size) ...@@ -586,7 +596,7 @@ static void __kmem_cache_free(void *b, int size)
if (size < PAGE_SIZE) if (size < PAGE_SIZE)
slob_free(b, size); slob_free(b, size);
else else
free_pages((unsigned long)b, get_order(size)); slob_free_pages(b, get_order(size));
} }
static void kmem_rcu_free(struct rcu_head *head) static void kmem_rcu_free(struct rcu_head *head)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment