Commit 4138fdfc authored by Roman Gushchin's avatar Roman Gushchin Committed by Linus Torvalds

mm: slub: implement SLUB version of obj_to_index()

This commit implements SLUB version of the obj_to_index() function, which
will be required to calculate the offset of obj_cgroup in the obj_cgroups
vector to store/obtain the objcg ownership data.

To make it faster, let's repeat the SLAB's trick introduced by commit
6a2d7a95 ("SLAB: use a multiply instead of a divide in
obj_to_index()") and avoid an expensive division.

Vlastimil Babka noticed, that SLUB does have already a similar function
called slab_index(), which is defined only if SLUB_DEBUG is enabled.  The
function does a similar math, but with a division, and it also takes a
page address instead of a page pointer.

Let's remove slab_index() and replace it with the new helper
__obj_to_index(), which takes a page address.  obj_to_index() will be a
simple wrapper taking a page pointer and passing page_address(page) into
__obj_to_index().
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20200623174037.3951353-5-guro@fb.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d42f3245
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* (C) 2007 SGI, Christoph Lameter * (C) 2007 SGI, Christoph Lameter
*/ */
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/reciprocal_div.h>
enum stat_item { enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_FASTPATH, /* Allocation from cpu slab */
...@@ -86,6 +87,7 @@ struct kmem_cache { ...@@ -86,6 +87,7 @@ struct kmem_cache {
unsigned long min_partial; unsigned long min_partial;
unsigned int size; /* The size of an object including metadata */ unsigned int size; /* The size of an object including metadata */
unsigned int object_size;/* The size of an object without metadata */ unsigned int object_size;/* The size of an object without metadata */
struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */ unsigned int offset; /* Free pointer offset */
#ifdef CONFIG_SLUB_CPU_PARTIAL #ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */ /* Number of per cpu partial objects to keep around */
...@@ -182,4 +184,18 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, ...@@ -182,4 +184,18 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
return result; return result;
} }
/* Determine object index from a given position */
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
void *addr, void *obj)
{
return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct page *page, void *obj)
{
return __obj_to_index(cache, page_address(page), obj);
}
#endif /* _LINUX_SLUB_DEF_H */ #endif /* _LINUX_SLUB_DEF_H */
...@@ -317,12 +317,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) ...@@ -317,12 +317,6 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
__p < (__addr) + (__objects) * (__s)->size; \ __p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size) __p += (__s)->size)
/* Determine object index from a given position */
static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
{
return (kasan_reset_tag(p) - addr) / s->size;
}
static inline unsigned int order_objects(unsigned int order, unsigned int size) static inline unsigned int order_objects(unsigned int order, unsigned int size)
{ {
return ((unsigned int)PAGE_SIZE << order) / size; return ((unsigned int)PAGE_SIZE << order) / size;
...@@ -465,7 +459,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page) ...@@ -465,7 +459,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
bitmap_zero(object_map, page->objects); bitmap_zero(object_map, page->objects);
for (p = page->freelist; p; p = get_freepointer(s, p)) for (p = page->freelist; p; p = get_freepointer(s, p))
set_bit(slab_index(p, s, addr), object_map); set_bit(__obj_to_index(s, addr, p), object_map);
return object_map; return object_map;
} }
...@@ -3754,6 +3748,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) ...@@ -3754,6 +3748,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/ */
size = ALIGN(size, s->align); size = ALIGN(size, s->align);
s->size = size; s->size = size;
s->reciprocal_size = reciprocal_value(size);
if (forced_order >= 0) if (forced_order >= 0)
order = forced_order; order = forced_order;
else else
...@@ -3858,7 +3853,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, ...@@ -3858,7 +3853,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
map = get_map(s, page); map = get_map(s, page);
for_each_object(p, s, addr, page->objects) { for_each_object(p, s, addr, page->objects) {
if (!test_bit(slab_index(p, s, addr), map)) { if (!test_bit(__obj_to_index(s, addr, p), map)) {
pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr); pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
print_tracking(s, p); print_tracking(s, p);
} }
...@@ -4574,7 +4569,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page) ...@@ -4574,7 +4569,7 @@ static void validate_slab(struct kmem_cache *s, struct page *page)
/* Now we know that a valid freelist exists */ /* Now we know that a valid freelist exists */
map = get_map(s, page); map = get_map(s, page);
for_each_object(p, s, addr, page->objects) { for_each_object(p, s, addr, page->objects) {
u8 val = test_bit(slab_index(p, s, addr), map) ? u8 val = test_bit(__obj_to_index(s, addr, p), map) ?
SLUB_RED_INACTIVE : SLUB_RED_ACTIVE; SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
if (!check_object(s, page, p, val)) if (!check_object(s, page, p, val))
...@@ -4765,7 +4760,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s, ...@@ -4765,7 +4760,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
map = get_map(s, page); map = get_map(s, page);
for_each_object(p, s, addr, page->objects) for_each_object(p, s, addr, page->objects)
if (!test_bit(slab_index(p, s, addr), map)) if (!test_bit(__obj_to_index(s, addr, p), map))
add_location(t, s, get_track(s, p, alloc)); add_location(t, s, get_track(s, p, alloc));
put_map(map); put_map(map);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment