Commit fd37617e authored by Pekka Enberg's avatar Pekka Enberg

Merge branches 'topic/fixes', 'topic/cleanups' and 'topic/documentation' into for-linus

...@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, ...@@ -253,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from. * request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
extern void *__kmalloc_track_caller(size_t, gfp_t, void*); extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \ #define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, __builtin_return_address(0)) __kmalloc_track_caller(size, flags, _RET_IP_)
#else #else
#define kmalloc_track_caller(size, flags) \ #define kmalloc_track_caller(size, flags) \
__kmalloc(size, flags) __kmalloc(size, flags)
...@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*); ...@@ -271,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
* allocation request comes from. * allocation request comes from.
*/ */
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \ #define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \ __kmalloc_node_track_caller(size, flags, node, \
__builtin_return_address(0)) _RET_IP_)
#else #else
#define kmalloc_node_track_caller(size, flags, node) \ #define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node(size, flags, node) __kmalloc_node(size, flags, node)
......
...@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) ...@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
* *
* @name must be valid until the cache is destroyed. This implies that * @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded. * the module calling this has to destroy the cache before getting unloaded.
* Note that kmem_cache_name() is not guaranteed to return the same pointer,
* therefore applications must manage it themselves.
* *
* The flags are * The flags are
* *
...@@ -2997,7 +2999,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) ...@@ -2997,7 +2999,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
* there must be at least one object available for * there must be at least one object available for
* allocation. * allocation.
*/ */
BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); BUG_ON(slabp->inuse >= cachep->num);
while (slabp->inuse < cachep->num && batchcount--) { while (slabp->inuse < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep); STATS_INC_ALLOCED(cachep);
...@@ -3686,9 +3688,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -3686,9 +3688,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc_node_track_caller(size_t size, gfp_t flags, void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, void *caller) int node, unsigned long caller)
{ {
return __do_kmalloc_node(size, flags, node, caller); return __do_kmalloc_node(size, flags, node, (void *)caller);
} }
EXPORT_SYMBOL(__kmalloc_node_track_caller); EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else #else
...@@ -3730,9 +3732,9 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -3730,9 +3732,9 @@ void *__kmalloc(size_t size, gfp_t flags)
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{ {
return __do_kmalloc(size, flags, caller); return __do_kmalloc(size, flags, (void *)caller);
} }
EXPORT_SYMBOL(__kmalloc_track_caller); EXPORT_SYMBOL(__kmalloc_track_caller);
......
...@@ -153,6 +153,10 @@ ...@@ -153,6 +153,10 @@
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif #endif
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
/* Internal SLUB flags */ /* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */ #define __OBJECT_POISON 0x80000000 /* Poison object */
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
...@@ -178,7 +182,7 @@ static LIST_HEAD(slab_caches); ...@@ -178,7 +182,7 @@ static LIST_HEAD(slab_caches);
* Tracking user of a slab. * Tracking user of a slab.
*/ */
struct track { struct track {
void *addr; /* Called from address */ unsigned long addr; /* Called from address */
int cpu; /* Was running on cpu */ int cpu; /* Was running on cpu */
int pid; /* Pid context */ int pid; /* Pid context */
unsigned long when; /* When did the operation occur */ unsigned long when; /* When did the operation occur */
...@@ -290,7 +294,7 @@ static inline struct kmem_cache_order_objects oo_make(int order, ...@@ -290,7 +294,7 @@ static inline struct kmem_cache_order_objects oo_make(int order,
unsigned long size) unsigned long size)
{ {
struct kmem_cache_order_objects x = { struct kmem_cache_order_objects x = {
(order << 16) + (PAGE_SIZE << order) / size (order << OO_SHIFT) + (PAGE_SIZE << order) / size
}; };
return x; return x;
...@@ -298,12 +302,12 @@ static inline struct kmem_cache_order_objects oo_make(int order, ...@@ -298,12 +302,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
static inline int oo_order(struct kmem_cache_order_objects x) static inline int oo_order(struct kmem_cache_order_objects x)
{ {
return x.x >> 16; return x.x >> OO_SHIFT;
} }
static inline int oo_objects(struct kmem_cache_order_objects x) static inline int oo_objects(struct kmem_cache_order_objects x)
{ {
return x.x & ((1 << 16) - 1); return x.x & OO_MASK;
} }
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
...@@ -367,7 +371,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, ...@@ -367,7 +371,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
} }
static void set_track(struct kmem_cache *s, void *object, static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, void *addr) enum track_item alloc, unsigned long addr)
{ {
struct track *p; struct track *p;
...@@ -391,8 +395,8 @@ static void init_tracking(struct kmem_cache *s, void *object) ...@@ -391,8 +395,8 @@ static void init_tracking(struct kmem_cache *s, void *object)
if (!(s->flags & SLAB_STORE_USER)) if (!(s->flags & SLAB_STORE_USER))
return; return;
set_track(s, object, TRACK_FREE, NULL); set_track(s, object, TRACK_FREE, 0UL);
set_track(s, object, TRACK_ALLOC, NULL); set_track(s, object, TRACK_ALLOC, 0UL);
} }
static void print_track(const char *s, struct track *t) static void print_track(const char *s, struct track *t)
...@@ -401,7 +405,7 @@ static void print_track(const char *s, struct track *t) ...@@ -401,7 +405,7 @@ static void print_track(const char *s, struct track *t)
return; return;
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n", printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
s, t->addr, jiffies - t->when, t->cpu, t->pid); s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
} }
static void print_tracking(struct kmem_cache *s, void *object) static void print_tracking(struct kmem_cache *s, void *object)
...@@ -692,7 +696,7 @@ static int check_object(struct kmem_cache *s, struct page *page, ...@@ -692,7 +696,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
if (!check_valid_pointer(s, page, get_freepointer(s, p))) { if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
object_err(s, page, p, "Freepointer corrupt"); object_err(s, page, p, "Freepointer corrupt");
/* /*
* No choice but to zap it and thus loose the remainder * No choice but to zap it and thus lose the remainder
* of the free objects in this slab. May cause * of the free objects in this slab. May cause
* another error because the object count is now wrong. * another error because the object count is now wrong.
*/ */
...@@ -764,8 +768,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) ...@@ -764,8 +768,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
} }
max_objects = (PAGE_SIZE << compound_order(page)) / s->size; max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
if (max_objects > 65535) if (max_objects > MAX_OBJS_PER_PAGE)
max_objects = 65535; max_objects = MAX_OBJS_PER_PAGE;
if (page->objects != max_objects) { if (page->objects != max_objects) {
slab_err(s, page, "Wrong number of objects. Found %d but " slab_err(s, page, "Wrong number of objects. Found %d but "
...@@ -866,7 +870,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, ...@@ -866,7 +870,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
} }
static int alloc_debug_processing(struct kmem_cache *s, struct page *page, static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
void *object, void *addr) void *object, unsigned long addr)
{ {
if (!check_slab(s, page)) if (!check_slab(s, page))
goto bad; goto bad;
...@@ -906,7 +910,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page, ...@@ -906,7 +910,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
} }
static int free_debug_processing(struct kmem_cache *s, struct page *page, static int free_debug_processing(struct kmem_cache *s, struct page *page,
void *object, void *addr) void *object, unsigned long addr)
{ {
if (!check_slab(s, page)) if (!check_slab(s, page))
goto fail; goto fail;
...@@ -1029,10 +1033,10 @@ static inline void setup_object_debug(struct kmem_cache *s, ...@@ -1029,10 +1033,10 @@ static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {} struct page *page, void *object) {}
static inline int alloc_debug_processing(struct kmem_cache *s, static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, void *addr) { return 0; } struct page *page, void *object, unsigned long addr) { return 0; }
static inline int free_debug_processing(struct kmem_cache *s, static inline int free_debug_processing(struct kmem_cache *s,
struct page *page, void *object, void *addr) { return 0; } struct page *page, void *object, unsigned long addr) { return 0; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page) static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; } { return 1; }
...@@ -1499,8 +1503,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node) ...@@ -1499,8 +1503,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
* we need to allocate a new slab. This is the slowest path since it involves * we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab. * a call to the page allocator and the setup of a new slab.
*/ */
static void *__slab_alloc(struct kmem_cache *s, static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c) unsigned long addr, struct kmem_cache_cpu *c)
{ {
void **object; void **object;
struct page *new; struct page *new;
...@@ -1584,7 +1588,7 @@ static void *__slab_alloc(struct kmem_cache *s, ...@@ -1584,7 +1588,7 @@ static void *__slab_alloc(struct kmem_cache *s,
* Otherwise we can simply pick the next object from the lockless free list. * Otherwise we can simply pick the next object from the lockless free list.
*/ */
static __always_inline void *slab_alloc(struct kmem_cache *s, static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr) gfp_t gfpflags, int node, unsigned long addr)
{ {
void **object; void **object;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
...@@ -1614,14 +1618,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, ...@@ -1614,14 +1618,14 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{ {
return slab_alloc(s, gfpflags, -1, __builtin_return_address(0)); return slab_alloc(s, gfpflags, -1, _RET_IP_);
} }
EXPORT_SYMBOL(kmem_cache_alloc); EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
return slab_alloc(s, gfpflags, node, __builtin_return_address(0)); return slab_alloc(s, gfpflags, node, _RET_IP_);
} }
EXPORT_SYMBOL(kmem_cache_alloc_node); EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif #endif
...@@ -1635,7 +1639,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); ...@@ -1635,7 +1639,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
* handling required then we can return immediately. * handling required then we can return immediately.
*/ */
static void __slab_free(struct kmem_cache *s, struct page *page, static void __slab_free(struct kmem_cache *s, struct page *page,
void *x, void *addr, unsigned int offset) void *x, unsigned long addr, unsigned int offset)
{ {
void *prior; void *prior;
void **object = (void *)x; void **object = (void *)x;
...@@ -1705,7 +1709,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -1705,7 +1709,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* with all sorts of special processing. * with all sorts of special processing.
*/ */
static __always_inline void slab_free(struct kmem_cache *s, static __always_inline void slab_free(struct kmem_cache *s,
struct page *page, void *x, void *addr) struct page *page, void *x, unsigned long addr)
{ {
void **object = (void *)x; void **object = (void *)x;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
...@@ -1732,11 +1736,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x) ...@@ -1732,11 +1736,11 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x); page = virt_to_head_page(x);
slab_free(s, page, x, __builtin_return_address(0)); slab_free(s, page, x, _RET_IP_);
} }
EXPORT_SYMBOL(kmem_cache_free); EXPORT_SYMBOL(kmem_cache_free);
/* Figure out on which slab object the object resides */ /* Figure out on which slab page the object resides */
static struct page *get_object_page(const void *x) static struct page *get_object_page(const void *x)
{ {
struct page *page = virt_to_head_page(x); struct page *page = virt_to_head_page(x);
...@@ -1808,8 +1812,8 @@ static inline int slab_order(int size, int min_objects, ...@@ -1808,8 +1812,8 @@ static inline int slab_order(int size, int min_objects,
int rem; int rem;
int min_order = slub_min_order; int min_order = slub_min_order;
if ((PAGE_SIZE << min_order) / size > 65535) if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
return get_order(size * 65535) - 1; return get_order(size * MAX_OBJS_PER_PAGE) - 1;
for (order = max(min_order, for (order = max(min_order,
fls(min_objects * size - 1) - PAGE_SHIFT); fls(min_objects * size - 1) - PAGE_SHIFT);
...@@ -2074,8 +2078,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) ...@@ -2074,8 +2078,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
* when allocating for the kmalloc_node_cache. This is used for bootstrapping * when allocating for the kmalloc_node_cache. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet. * memory on a fresh node that has no slab structures yet.
*/ */
static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
int node)
{ {
struct page *page; struct page *page;
struct kmem_cache_node *n; struct kmem_cache_node *n;
...@@ -2113,7 +2116,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags, ...@@ -2113,7 +2116,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
local_irq_save(flags); local_irq_save(flags);
add_partial(n, page, 0); add_partial(n, page, 0);
local_irq_restore(flags); local_irq_restore(flags);
return n;
} }
static void free_kmem_cache_nodes(struct kmem_cache *s) static void free_kmem_cache_nodes(struct kmem_cache *s)
...@@ -2145,8 +2147,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) ...@@ -2145,8 +2147,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
n = &s->local_node; n = &s->local_node;
else { else {
if (slab_state == DOWN) { if (slab_state == DOWN) {
n = early_kmem_cache_node_alloc(gfpflags, early_kmem_cache_node_alloc(gfpflags, node);
node);
continue; continue;
} }
n = kmem_cache_alloc_node(kmalloc_caches, n = kmem_cache_alloc_node(kmalloc_caches,
...@@ -2660,7 +2661,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -2660,7 +2661,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
return slab_alloc(s, flags, -1, __builtin_return_address(0)); return slab_alloc(s, flags, -1, _RET_IP_);
} }
EXPORT_SYMBOL(__kmalloc); EXPORT_SYMBOL(__kmalloc);
...@@ -2688,7 +2689,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -2688,7 +2689,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
return slab_alloc(s, flags, node, __builtin_return_address(0)); return slab_alloc(s, flags, node, _RET_IP_);
} }
EXPORT_SYMBOL(__kmalloc_node); EXPORT_SYMBOL(__kmalloc_node);
#endif #endif
...@@ -2745,7 +2746,7 @@ void kfree(const void *x) ...@@ -2745,7 +2746,7 @@ void kfree(const void *x)
put_page(page); put_page(page);
return; return;
} }
slab_free(page->slab, page, object, __builtin_return_address(0)); slab_free(page->slab, page, object, _RET_IP_);
} }
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
...@@ -3212,7 +3213,7 @@ static struct notifier_block __cpuinitdata slab_notifier = { ...@@ -3212,7 +3213,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
#endif #endif
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{ {
struct kmem_cache *s; struct kmem_cache *s;
...@@ -3228,7 +3229,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) ...@@ -3228,7 +3229,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
} }
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, void *caller) int node, unsigned long caller)
{ {
struct kmem_cache *s; struct kmem_cache *s;
...@@ -3439,7 +3440,7 @@ static void resiliency_test(void) {}; ...@@ -3439,7 +3440,7 @@ static void resiliency_test(void) {};
struct location { struct location {
unsigned long count; unsigned long count;
void *addr; unsigned long addr;
long long sum_time; long long sum_time;
long min_time; long min_time;
long max_time; long max_time;
...@@ -3487,7 +3488,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, ...@@ -3487,7 +3488,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
{ {
long start, end, pos; long start, end, pos;
struct location *l; struct location *l;
void *caddr; unsigned long caddr;
unsigned long age = jiffies - track->when; unsigned long age = jiffies - track->when;
start = -1; start = -1;
...@@ -4355,7 +4356,7 @@ static void sysfs_slab_remove(struct kmem_cache *s) ...@@ -4355,7 +4356,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
/* /*
* Need to buffer aliases during bootup until sysfs becomes * Need to buffer aliases during bootup until sysfs becomes
* available lest we loose that information. * available lest we lose that information.
*/ */
struct saved_alias { struct saved_alias {
struct kmem_cache *s; struct kmem_cache *s;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment