Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
023dc704
Commit
023dc704
authored
Oct 03, 2012
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'slab/next' into slab/for-linus
parents
a0d271cb
608da7e3
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
186 additions
and
162 deletions
+186
-162
include/linux/slab.h
include/linux/slab.h
+4
-2
include/linux/slab_def.h
include/linux/slab_def.h
+3
-10
include/linux/slob_def.h
include/linux/slob_def.h
+4
-2
mm/slab.c
mm/slab.c
+40
-55
mm/slab_common.c
mm/slab_common.c
+48
-49
mm/slob.c
mm/slob.c
+27
-6
mm/slub.c
mm/slub.c
+39
-24
mm/util.c
mm/util.c
+21
-14
No files found.
include/linux/slab.h
View file @
023dc704
...
...
@@ -321,7 +321,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
* request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_track_caller
(
size_t
,
gfp_t
,
unsigned
long
);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
...
...
@@ -340,7 +341,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
* allocation request comes from.
*/
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
(defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern
void
*
__kmalloc_node_track_caller
(
size_t
,
gfp_t
,
int
,
unsigned
long
);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
...
...
include/linux/slab_def.h
View file @
023dc704
...
...
@@ -45,7 +45,6 @@ struct kmem_cache {
unsigned
int
colour_off
;
/* colour offset */
struct
kmem_cache
*
slabp_cache
;
unsigned
int
slab_size
;
unsigned
int
dflags
;
/* dynamic flags */
/* constructor func */
void
(
*
ctor
)(
void
*
obj
);
...
...
@@ -112,19 +111,13 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
);
extern
size_t
slab_buffer_size
(
struct
kmem_cache
*
cachep
);
extern
void
*
kmem_cache_alloc_trace
(
struct
kmem_cache
*
,
gfp_t
,
size_t
);
#else
static
__always_inline
void
*
kmem_cache_alloc_trace
(
s
ize_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
kmem_cache_alloc_trace
(
s
truct
kmem_cache
*
cachep
,
gfp_t
flags
,
size_t
size
)
{
return
kmem_cache_alloc
(
cachep
,
flags
);
}
static
inline
size_t
slab_buffer_size
(
struct
kmem_cache
*
cachep
)
{
return
0
;
}
#endif
static
__always_inline
void
*
kmalloc
(
size_t
size
,
gfp_t
flags
)
...
...
@@ -154,7 +147,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif
cachep
=
malloc_sizes
[
i
].
cs_cachep
;
ret
=
kmem_cache_alloc_trace
(
size
,
cachep
,
flags
);
ret
=
kmem_cache_alloc_trace
(
cachep
,
flags
,
size
);
return
ret
;
}
...
...
include/linux/slob_def.h
View file @
023dc704
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H
#include <linux/numa.h>
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
,
gfp_t
flags
,
int
node
);
static
__always_inline
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
{
return
kmem_cache_alloc_node
(
cachep
,
flags
,
-
1
);
return
kmem_cache_alloc_node
(
cachep
,
flags
,
NUMA_NO_NODE
);
}
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
);
...
...
@@ -26,7 +28,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
*/
static
__always_inline
void
*
kmalloc
(
size_t
size
,
gfp_t
flags
)
{
return
__kmalloc_node
(
size
,
flags
,
-
1
);
return
__kmalloc_node
(
size
,
flags
,
NUMA_NO_NODE
);
}
static
__always_inline
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
)
...
...
mm/slab.c
View file @
023dc704
...
...
@@ -498,14 +498,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
#ifdef CONFIG_TRACING
size_t
slab_buffer_size
(
struct
kmem_cache
*
cachep
)
{
return
cachep
->
size
;
}
EXPORT_SYMBOL
(
slab_buffer_size
);
#endif
/*
* Do not go above this order unless 0 objects fit into the slab or
* overridden on the command line.
...
...
@@ -515,13 +507,6 @@ EXPORT_SYMBOL(slab_buffer_size);
static
int
slab_max_order
=
SLAB_MAX_ORDER_LO
;
static
bool
slab_max_order_set
__initdata
;
static
inline
struct
kmem_cache
*
page_get_cache
(
struct
page
*
page
)
{
page
=
compound_head
(
page
);
BUG_ON
(
!
PageSlab
(
page
));
return
page
->
slab_cache
;
}
static
inline
struct
kmem_cache
*
virt_to_cache
(
const
void
*
obj
)
{
struct
page
*
page
=
virt_to_head_page
(
obj
);
...
...
@@ -818,6 +803,7 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
printk
(
KERN_ERR
"slab error in %s(): cache `%s': %s
\n
"
,
function
,
cachep
->
name
,
msg
);
dump_stack
();
add_taint
(
TAINT_BAD_PAGE
);
}
/*
...
...
@@ -1781,9 +1767,6 @@ void __init kmem_cache_init_late(void)
slab_state
=
UP
;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys
();
/* 6) resize the head arrays to their final sizes */
mutex_lock
(
&
slab_mutex
);
list_for_each_entry
(
cachep
,
&
slab_caches
,
list
)
...
...
@@ -1791,6 +1774,9 @@ void __init kmem_cache_init_late(void)
BUG
();
mutex_unlock
(
&
slab_mutex
);
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys
();
/* Done! */
slab_state
=
FULL
;
...
...
@@ -2506,8 +2492,9 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if
(
size
>=
malloc_sizes
[
INDEX_L3
+
1
].
cs_size
&&
cachep
->
object_size
>
cache_line_size
()
&&
ALIGN
(
size
,
align
)
<
PAGE_SIZE
)
{
cachep
->
obj_offset
+=
PAGE_SIZE
-
ALIGN
(
size
,
align
);
&&
cachep
->
object_size
>
cache_line_size
()
&&
ALIGN
(
size
,
cachep
->
align
)
<
PAGE_SIZE
)
{
cachep
->
obj_offset
+=
PAGE_SIZE
-
ALIGN
(
size
,
cachep
->
align
);
size
=
PAGE_SIZE
;
}
#endif
...
...
@@ -3098,7 +3085,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
}
static
void
*
cache_free_debugcheck
(
struct
kmem_cache
*
cachep
,
void
*
objp
,
void
*
caller
)
unsigned
long
caller
)
{
struct
page
*
page
;
unsigned
int
objnr
;
...
...
@@ -3118,7 +3105,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
*
dbg_redzone2
(
cachep
,
objp
)
=
RED_INACTIVE
;
}
if
(
cachep
->
flags
&
SLAB_STORE_USER
)
*
dbg_userword
(
cachep
,
objp
)
=
caller
;
*
dbg_userword
(
cachep
,
objp
)
=
(
void
*
)
caller
;
objnr
=
obj_to_index
(
cachep
,
slabp
,
objp
);
...
...
@@ -3131,7 +3118,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
if
(
cachep
->
flags
&
SLAB_POISON
)
{
#ifdef CONFIG_DEBUG_PAGEALLOC
if
((
cachep
->
size
%
PAGE_SIZE
)
==
0
&&
OFF_SLAB
(
cachep
))
{
store_stackinfo
(
cachep
,
objp
,
(
unsigned
long
)
caller
);
store_stackinfo
(
cachep
,
objp
,
caller
);
kernel_map_pages
(
virt_to_page
(
objp
),
cachep
->
size
/
PAGE_SIZE
,
0
);
}
else
{
...
...
@@ -3285,7 +3272,7 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
#if DEBUG
static
void
*
cache_alloc_debugcheck_after
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
void
*
objp
,
void
*
caller
)
gfp_t
flags
,
void
*
objp
,
unsigned
long
caller
)
{
if
(
!
objp
)
return
objp
;
...
...
@@ -3302,7 +3289,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
poison_obj
(
cachep
,
objp
,
POISON_INUSE
);
}
if
(
cachep
->
flags
&
SLAB_STORE_USER
)
*
dbg_userword
(
cachep
,
objp
)
=
caller
;
*
dbg_userword
(
cachep
,
objp
)
=
(
void
*
)
caller
;
if
(
cachep
->
flags
&
SLAB_RED_ZONE
)
{
if
(
*
dbg_redzone1
(
cachep
,
objp
)
!=
RED_INACTIVE
||
...
...
@@ -3576,8 +3563,8 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
* Fallback to other node is possible if __GFP_THISNODE is not set.
*/
static
__always_inline
void
*
__cache
_alloc_node
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
,
void
*
caller
)
slab
_alloc_node
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
,
unsigned
long
caller
)
{
unsigned
long
save_flags
;
void
*
ptr
;
...
...
@@ -3663,7 +3650,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif
/* CONFIG_NUMA */
static
__always_inline
void
*
__cache_alloc
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
void
*
caller
)
slab_alloc
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
unsigned
long
caller
)
{
unsigned
long
save_flags
;
void
*
objp
;
...
...
@@ -3799,7 +3786,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
* be in this state _before_ it is released. Called with disabled ints.
*/
static
inline
void
__cache_free
(
struct
kmem_cache
*
cachep
,
void
*
objp
,
void
*
caller
)
unsigned
long
caller
)
{
struct
array_cache
*
ac
=
cpu_cache_get
(
cachep
);
...
...
@@ -3839,7 +3826,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
*/
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
{
void
*
ret
=
__cache_alloc
(
cachep
,
flags
,
__builtin_return_address
(
0
)
);
void
*
ret
=
slab_alloc
(
cachep
,
flags
,
_RET_IP_
);
trace_kmem_cache_alloc
(
_RET_IP_
,
ret
,
cachep
->
object_size
,
cachep
->
size
,
flags
);
...
...
@@ -3850,14 +3837,14 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_trace
(
s
ize_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
kmem_cache_alloc_trace
(
s
truct
kmem_cache
*
cachep
,
gfp_t
flags
,
size_t
size
)
{
void
*
ret
;
ret
=
__cache_alloc
(
cachep
,
flags
,
__builtin_return_address
(
0
)
);
ret
=
slab_alloc
(
cachep
,
flags
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
)
,
flags
);
size
,
cachep
->
size
,
flags
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_trace
);
...
...
@@ -3866,8 +3853,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
{
void
*
ret
=
__cache_alloc_node
(
cachep
,
flags
,
nodeid
,
__builtin_return_address
(
0
));
void
*
ret
=
slab_alloc_node
(
cachep
,
flags
,
nodeid
,
_RET_IP_
);
trace_kmem_cache_alloc_node
(
_RET_IP_
,
ret
,
cachep
->
object_size
,
cachep
->
size
,
...
...
@@ -3878,17 +3864,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL
(
kmem_cache_alloc_node
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_node_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
void
*
kmem_cache_alloc_node_trace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
int
nodeid
,
size_t
size
)
{
void
*
ret
;
ret
=
__cache_alloc_node
(
cachep
,
flags
,
nodeid
,
__builtin_return_address
(
0
));
ret
=
slab_alloc_node
(
cachep
,
flags
,
nodeid
,
_RET_IP
);
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
)
,
size
,
cachep
->
size
,
flags
,
nodeid
);
return
ret
;
}
...
...
@@ -3896,34 +3882,33 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif
static
__always_inline
void
*
__do_kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
,
void
*
caller
)
__do_kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
,
unsigned
long
caller
)
{
struct
kmem_cache
*
cachep
;
cachep
=
kmem_find_general_cachep
(
size
,
flags
);
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
cachep
)))
return
cachep
;
return
kmem_cache_alloc_node_trace
(
size
,
cachep
,
flags
,
nod
e
);
return
kmem_cache_alloc_node_trace
(
cachep
,
flags
,
node
,
siz
e
);
}
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
return
__do_kmalloc_node
(
size
,
flags
,
node
,
__builtin_return_address
(
0
));
return
__do_kmalloc_node
(
size
,
flags
,
node
,
_RET_IP_
);
}
EXPORT_SYMBOL
(
__kmalloc_node
);
void
*
__kmalloc_node_track_caller
(
size_t
size
,
gfp_t
flags
,
int
node
,
unsigned
long
caller
)
{
return
__do_kmalloc_node
(
size
,
flags
,
node
,
(
void
*
)
caller
);
return
__do_kmalloc_node
(
size
,
flags
,
node
,
caller
);
}
EXPORT_SYMBOL
(
__kmalloc_node_track_caller
);
#else
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
return
__do_kmalloc_node
(
size
,
flags
,
node
,
NULL
);
return
__do_kmalloc_node
(
size
,
flags
,
node
,
0
);
}
EXPORT_SYMBOL
(
__kmalloc_node
);
#endif
/* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
...
...
@@ -3936,7 +3921,7 @@ EXPORT_SYMBOL(__kmalloc_node);
* @caller: function caller for debug tracking of the caller
*/
static
__always_inline
void
*
__do_kmalloc
(
size_t
size
,
gfp_t
flags
,
void
*
caller
)
unsigned
long
caller
)
{
struct
kmem_cache
*
cachep
;
void
*
ret
;
...
...
@@ -3949,9 +3934,9 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep
=
__find_general_cachep
(
size
,
flags
);
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
cachep
)))
return
cachep
;
ret
=
__cache
_alloc
(
cachep
,
flags
,
caller
);
ret
=
slab
_alloc
(
cachep
,
flags
,
caller
);
trace_kmalloc
(
(
unsigned
long
)
caller
,
ret
,
trace_kmalloc
(
caller
,
ret
,
size
,
cachep
->
size
,
flags
);
return
ret
;
...
...
@@ -3961,20 +3946,20 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
)
{
return
__do_kmalloc
(
size
,
flags
,
_
_builtin_return_address
(
0
)
);
return
__do_kmalloc
(
size
,
flags
,
_
RET_IP_
);
}
EXPORT_SYMBOL
(
__kmalloc
);
void
*
__kmalloc_track_caller
(
size_t
size
,
gfp_t
flags
,
unsigned
long
caller
)
{
return
__do_kmalloc
(
size
,
flags
,
(
void
*
)
caller
);
return
__do_kmalloc
(
size
,
flags
,
caller
);
}
EXPORT_SYMBOL
(
__kmalloc_track_caller
);
#else
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
)
{
return
__do_kmalloc
(
size
,
flags
,
NULL
);
return
__do_kmalloc
(
size
,
flags
,
0
);
}
EXPORT_SYMBOL
(
__kmalloc
);
#endif
...
...
@@ -3995,7 +3980,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_locks_freed
(
objp
,
cachep
->
object_size
);
if
(
!
(
cachep
->
flags
&
SLAB_DEBUG_OBJECTS
))
debug_check_no_obj_freed
(
objp
,
cachep
->
object_size
);
__cache_free
(
cachep
,
objp
,
_
_builtin_return_address
(
0
)
);
__cache_free
(
cachep
,
objp
,
_
RET_IP_
);
local_irq_restore
(
flags
);
trace_kmem_cache_free
(
_RET_IP_
,
objp
);
...
...
@@ -4026,7 +4011,7 @@ void kfree(const void *objp)
debug_check_no_locks_freed
(
objp
,
c
->
object_size
);
debug_check_no_obj_freed
(
objp
,
c
->
object_size
);
__cache_free
(
c
,
(
void
*
)
objp
,
_
_builtin_return_address
(
0
)
);
__cache_free
(
c
,
(
void
*
)
objp
,
_
RET_IP_
);
local_irq_restore
(
flags
);
}
EXPORT_SYMBOL
(
kfree
);
...
...
mm/slab_common.c
View file @
023dc704
...
...
@@ -23,49 +23,17 @@ enum slab_state slab_state;
LIST_HEAD
(
slab_caches
);
DEFINE_MUTEX
(
slab_mutex
);
/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a interrupt, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
* to catch references to uninitialised memory.
*
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
* for buffer overruns.
*
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
struct
kmem_cache
*
kmem_cache_create
(
const
char
*
name
,
size_t
size
,
size_t
align
,
unsigned
long
flags
,
void
(
*
ctor
)(
void
*
))
#ifdef CONFIG_DEBUG_VM
static
int
kmem_cache_sanity_check
(
const
char
*
name
,
size_t
size
)
{
struct
kmem_cache
*
s
=
NULL
;
#ifdef CONFIG_DEBUG_VM
if
(
!
name
||
in_interrupt
()
||
size
<
sizeof
(
void
*
)
||
size
>
KMALLOC_MAX_SIZE
)
{
printk
(
KERN_ERR
"kmem_cache_create(%s) integrity check"
" failed
\n
"
,
name
);
goto
out
;
pr_err
(
"kmem_cache_create(%s) integrity check failed
\n
"
,
name
);
return
-
EINVAL
;
}
#endif
get_online_cpus
();
mutex_lock
(
&
slab_mutex
);
#ifdef CONFIG_DEBUG_VM
list_for_each_entry
(
s
,
&
slab_caches
,
list
)
{
char
tmp
;
int
res
;
...
...
@@ -77,36 +45,67 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
*/
res
=
probe_kernel_address
(
s
->
name
,
tmp
);
if
(
res
)
{
printk
(
KERN_ERR
"Slab cache with size %d has lost its name
\n
"
,
pr_err
(
"Slab cache with size %d has lost its name
\n
"
,
s
->
object_size
);
continue
;
}
if
(
!
strcmp
(
s
->
name
,
name
))
{
printk
(
KERN_ERR
"kmem_cache_create(%s): Cache name"
" already exists.
\n
"
,
name
);
pr_err
(
"%s (%s): Cache name already exists.
\n
"
,
__func__
,
name
);
dump_stack
();
s
=
NULL
;
goto
oops
;
return
-
EINVAL
;
}
}
WARN_ON
(
strchr
(
name
,
' '
));
/* It confuses parsers */
return
0
;
}
#else
static
inline
int
kmem_cache_sanity_check
(
const
char
*
name
,
size_t
size
)
{
return
0
;
}
#endif
s
=
__kmem_cache_create
(
name
,
size
,
align
,
flags
,
ctor
);
/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a interrupt, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
* to catch references to uninitialised memory.
*
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
* for buffer overruns.
*
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
struct
kmem_cache
*
kmem_cache_create
(
const
char
*
name
,
size_t
size
,
size_t
align
,
unsigned
long
flags
,
void
(
*
ctor
)(
void
*
))
{
struct
kmem_cache
*
s
=
NULL
;
#ifdef CONFIG_DEBUG_VM
oops:
#endif
get_online_cpus
();
mutex_lock
(
&
slab_mutex
);
if
(
kmem_cache_sanity_check
(
name
,
size
)
==
0
)
s
=
__kmem_cache_create
(
name
,
size
,
align
,
flags
,
ctor
);
mutex_unlock
(
&
slab_mutex
);
put_online_cpus
();
#ifdef CONFIG_DEBUG_VM
out:
#endif
if
(
!
s
&&
(
flags
&
SLAB_PANIC
))
panic
(
"kmem_cache_create: Failed to create slab '%s'
\n
"
,
name
);
...
...
mm/slob.c
View file @
023dc704
...
...
@@ -194,7 +194,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
void
*
page
;
#ifdef CONFIG_NUMA
if
(
node
!=
-
1
)
if
(
node
!=
NUMA_NO_NODE
)
page
=
alloc_pages_exact_node
(
node
,
gfp
,
order
);
else
#endif
...
...
@@ -290,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
if
(
node
!=
-
1
&&
page_to_nid
(
sp
)
!=
node
)
if
(
node
!=
NUMA_NO_NODE
&&
page_to_nid
(
sp
)
!=
node
)
continue
;
#endif
/* Enough room on this page? */
...
...
@@ -425,7 +425,8 @@ static void slob_free(void *block, int size)
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
gfp
,
int
node
)
static
__always_inline
void
*
__do_kmalloc_node
(
size_t
size
,
gfp_t
gfp
,
int
node
,
unsigned
long
caller
)
{
unsigned
int
*
m
;
int
align
=
max
(
ARCH_KMALLOC_MINALIGN
,
ARCH_SLAB_MINALIGN
);
...
...
@@ -446,7 +447,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
*
m
=
size
;
ret
=
(
void
*
)
m
+
align
;
trace_kmalloc_node
(
_RET_IP_
,
ret
,
trace_kmalloc_node
(
caller
,
ret
,
size
,
size
+
align
,
gfp
,
node
);
}
else
{
unsigned
int
order
=
get_order
(
size
);
...
...
@@ -460,15 +461,35 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page
->
private
=
size
;
}
trace_kmalloc_node
(
_RET_IP_
,
ret
,
trace_kmalloc_node
(
caller
,
ret
,
size
,
PAGE_SIZE
<<
order
,
gfp
,
node
);
}
kmemleak_alloc
(
ret
,
size
,
1
,
gfp
);
return
ret
;
}
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
gfp
,
int
node
)
{
return
__do_kmalloc_node
(
size
,
gfp
,
node
,
_RET_IP_
);
}
EXPORT_SYMBOL
(
__kmalloc_node
);
#ifdef CONFIG_TRACING
void
*
__kmalloc_track_caller
(
size_t
size
,
gfp_t
gfp
,
unsigned
long
caller
)
{
return
__do_kmalloc_node
(
size
,
gfp
,
NUMA_NO_NODE
,
caller
);
}
#ifdef CONFIG_NUMA
void
*
__kmalloc_node_track_caller
(
size_t
size
,
gfp_t
gfpflags
,
int
node
,
unsigned
long
caller
)
{
return
__do_kmalloc_node
(
size
,
gfp
,
node
,
caller
);
}
#endif
#endif
void
kfree
(
const
void
*
block
)
{
struct
page
*
sp
;
...
...
@@ -514,7 +535,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
struct
kmem_cache
*
c
;
c
=
slob_alloc
(
sizeof
(
struct
kmem_cache
),
GFP_KERNEL
,
ARCH_KMALLOC_MINALIGN
,
-
1
);
GFP_KERNEL
,
ARCH_KMALLOC_MINALIGN
,
NUMA_NO_NODE
);
if
(
c
)
{
c
->
name
=
name
;
...
...
mm/slub.c
View file @
023dc704
...
...
@@ -568,6 +568,8 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
printk
(
KERN_ERR
"BUG %s (%s): %s
\n
"
,
s
->
name
,
print_tainted
(),
buf
);
printk
(
KERN_ERR
"----------------------------------------"
"-------------------------------------
\n\n
"
);
add_taint
(
TAINT_BAD_PAGE
);
}
static
void
slab_fix
(
struct
kmem_cache
*
s
,
char
*
fmt
,
...)
...
...
@@ -1069,13 +1071,13 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa
return
0
;
}
static
noinline
int
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
)
static
noinline
struct
kmem_cache_node
*
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
,
unsigned
long
*
flags
)
{
unsigned
long
flags
;
int
rc
=
0
;
struct
kmem_cache_node
*
n
=
get_node
(
s
,
page_to_nid
(
page
));
local_irq_save
(
flags
);
spin_lock_irqsave
(
&
n
->
list_lock
,
*
flags
);
slab_lock
(
page
);
if
(
!
check_slab
(
s
,
page
))
...
...
@@ -1113,15 +1115,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
set_track
(
s
,
object
,
TRACK_FREE
,
addr
);
trace
(
s
,
page
,
object
,
0
);
init_object
(
s
,
object
,
SLUB_RED_INACTIVE
);
rc
=
1
;
out:
slab_unlock
(
page
);
local_irq_restore
(
flags
);
return
rc
;
/*
* Keep node_lock to preserve integrity
* until the object is actually freed
*/
return
n
;
fail:
slab_unlock
(
page
);
spin_unlock_irqrestore
(
&
n
->
list_lock
,
*
flags
);
slab_fix
(
s
,
"Object at 0x%p not freed"
,
object
);
goto
out
;
return
NULL
;
}
static
int
__init
setup_slub_debug
(
char
*
str
)
...
...
@@ -1214,8 +1220,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
static
inline
int
alloc_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
)
{
return
0
;
}
static
inline
int
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
)
{
return
0
;
}
static
inline
struct
kmem_cache_node
*
free_debug_processing
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
void
*
object
,
unsigned
long
addr
,
unsigned
long
*
flags
)
{
return
NULL
;
}
static
inline
int
slab_pad_check
(
struct
kmem_cache
*
s
,
struct
page
*
page
)
{
return
1
;
}
...
...
@@ -1714,7 +1721,7 @@ static inline void note_cmpxchg_failure(const char *n,
stat
(
s
,
CMPXCHG_DOUBLE_CPU_FAIL
);
}
void
init_kmem_cache_cpus
(
struct
kmem_cache
*
s
)
static
void
init_kmem_cache_cpus
(
struct
kmem_cache
*
s
)
{
int
cpu
;
...
...
@@ -1939,7 +1946,7 @@ static void unfreeze_partials(struct kmem_cache *s)
* If we did not find a slot then simply move all the partials to the
* per node partial list.
*/
int
put_cpu_partial
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
int
drain
)
static
int
put_cpu_partial
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
int
drain
)
{
struct
page
*
oldpage
;
int
pages
;
...
...
@@ -1962,6 +1969,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_save
(
flags
);
unfreeze_partials
(
s
);
local_irq_restore
(
flags
);
oldpage
=
NULL
;
pobjects
=
0
;
pages
=
0
;
stat
(
s
,
CPU_PARTIAL_DRAIN
);
...
...
@@ -2310,7 +2318,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
*
* Otherwise we can simply pick the next object from the lockless free list.
*/
static
__always_inline
void
*
slab_alloc
(
struct
kmem_cache
*
s
,
static
__always_inline
void
*
slab_alloc
_node
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
,
unsigned
long
addr
)
{
void
**
object
;
...
...
@@ -2380,9 +2388,15 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return
object
;
}
static
__always_inline
void
*
slab_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
unsigned
long
addr
)
{
return
slab_alloc_node
(
s
,
gfpflags
,
NUMA_NO_NODE
,
addr
);
}
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
_RET_IP_
);
trace_kmem_cache_alloc
(
_RET_IP_
,
ret
,
s
->
object_size
,
s
->
size
,
gfpflags
);
...
...
@@ -2393,7 +2407,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
);
return
ret
;
}
...
...
@@ -2411,7 +2425,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
#ifdef CONFIG_NUMA
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
void
*
ret
=
slab_alloc
_node
(
s
,
gfpflags
,
node
,
_RET_IP_
);
trace_kmem_cache_alloc_node
(
_RET_IP_
,
ret
,
s
->
object_size
,
s
->
size
,
gfpflags
,
node
);
...
...
@@ -2425,7 +2439,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t
gfpflags
,
int
node
,
size_t
size
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
void
*
ret
=
slab_alloc
_node
(
s
,
gfpflags
,
node
,
_RET_IP_
);
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
,
node
);
...
...
@@ -2457,7 +2471,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat
(
s
,
FREE_SLOWPATH
);
if
(
kmem_cache_debug
(
s
)
&&
!
free_debug_processing
(
s
,
page
,
x
,
addr
))
if
(
kmem_cache_debug
(
s
)
&&
!
(
n
=
free_debug_processing
(
s
,
page
,
x
,
addr
,
&
flags
)))
return
;
do
{
...
...
@@ -3362,7 +3377,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
flags
,
NUMA_NO_NODE
,
_RET_IP_
);
ret
=
slab_alloc
(
s
,
flags
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
flags
);
...
...
@@ -3405,7 +3420,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
flags
,
node
,
_RET_IP_
);
ret
=
slab_alloc
_node
(
s
,
flags
,
node
,
_RET_IP_
);
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
flags
,
node
);
...
...
@@ -3482,7 +3497,7 @@ void kfree(const void *x)
if
(
unlikely
(
!
PageSlab
(
page
)))
{
BUG_ON
(
!
PageCompound
(
page
));
kmemleak_free
(
x
);
put_page
(
page
);
__free_pages
(
page
,
compound_order
(
page
)
);
return
;
}
slab_free
(
page
->
slab
,
page
,
object
,
_RET_IP_
);
...
...
@@ -4033,7 +4048,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
caller
);
ret
=
slab_alloc
(
s
,
gfpflags
,
caller
);
/* Honor the call site pointer we received. */
trace_kmalloc
(
caller
,
ret
,
size
,
s
->
size
,
gfpflags
);
...
...
@@ -4063,7 +4078,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
s
)))
return
s
;
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
caller
);
ret
=
slab_alloc
_node
(
s
,
gfpflags
,
node
,
caller
);
/* Honor the call site pointer we received. */
trace_kmalloc_node
(
caller
,
ret
,
size
,
s
->
size
,
gfpflags
,
node
);
...
...
mm/util.c
View file @
023dc704
...
...
@@ -105,6 +105,25 @@ void *memdup_user(const void __user *src, size_t len)
}
EXPORT_SYMBOL
(
memdup_user
);
static
__always_inline
void
*
__do_krealloc
(
const
void
*
p
,
size_t
new_size
,
gfp_t
flags
)
{
void
*
ret
;
size_t
ks
=
0
;
if
(
p
)
ks
=
ksize
(
p
);
if
(
ks
>=
new_size
)
return
(
void
*
)
p
;
ret
=
kmalloc_track_caller
(
new_size
,
flags
);
if
(
ret
&&
p
)
memcpy
(
ret
,
p
,
ks
);
return
ret
;
}
/**
* __krealloc - like krealloc() but don't free @p.
* @p: object to reallocate memory for.
...
...
@@ -117,23 +136,11 @@ EXPORT_SYMBOL(memdup_user);
*/
void
*
__krealloc
(
const
void
*
p
,
size_t
new_size
,
gfp_t
flags
)
{
void
*
ret
;
size_t
ks
=
0
;
if
(
unlikely
(
!
new_size
))
return
ZERO_SIZE_PTR
;
if
(
p
)
ks
=
ksize
(
p
);
return
__do_krealloc
(
p
,
new_size
,
flags
);
if
(
ks
>=
new_size
)
return
(
void
*
)
p
;
ret
=
kmalloc_track_caller
(
new_size
,
flags
);
if
(
ret
&&
p
)
memcpy
(
ret
,
p
,
ks
);
return
ret
;
}
EXPORT_SYMBOL
(
__krealloc
);
...
...
@@ -157,7 +164,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
return
ZERO_SIZE_PTR
;
}
ret
=
__krealloc
(
p
,
new_size
,
flags
);
ret
=
__
do_
krealloc
(
p
,
new_size
,
flags
);
if
(
ret
&&
p
!=
ret
)
kfree
(
p
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment