Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a45b0616
Commit
a45b0616
authored
Jan 09, 2011
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'slab/next' into for-linus
parents
3c0eee3f
8165984a
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
89 additions
and
75 deletions
+89
-75
Documentation/vm/Makefile
Documentation/vm/Makefile
+1
-1
include/linux/slab_def.h
include/linux/slab_def.h
+13
-20
include/linux/slub_def.h
include/linux/slub_def.h
+26
-29
mm/slab.c
mm/slab.c
+23
-15
mm/slub.c
mm/slub.c
+23
-7
tools/slub/slabinfo.c
tools/slub/slabinfo.c
+3
-3
No files found.
Documentation/vm/Makefile
View file @
a45b0616
...
...
@@ -2,7 +2,7 @@
obj-
:=
dummy.o
# List of programs to build
hostprogs-y
:=
slabinfo
page-types hugepage-mmap hugepage-shm map_hugetlb
hostprogs-y
:=
page-types hugepage-mmap hugepage-shm map_hugetlb
# Tell kbuild to always build the programs
always
:=
$
(
hostprogs-y
)
include/linux/slab_def.h
View file @
a45b0616
...
...
@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
);
extern
void
*
kmem_cache_alloc_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
);
extern
size_t
slab_buffer_size
(
struct
kmem_cache
*
cachep
);
#else
static
__always_inline
void
*
kmem_cache_alloc_
notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
kmem_cache_alloc_
trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
{
return
kmem_cache_alloc
(
cachep
,
flags
);
}
...
...
@@ -179,10 +180,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
#endif
cachep
=
malloc_sizes
[
i
].
cs_cachep
;
ret
=
kmem_cache_alloc_notrace
(
cachep
,
flags
);
trace_kmalloc
(
_THIS_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
);
ret
=
kmem_cache_alloc_trace
(
size
,
cachep
,
flags
);
return
ret
;
}
...
...
@@ -194,14 +192,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
,
gfp_t
flags
,
int
node
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_node_notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
);
extern
void
*
kmem_cache_alloc_node_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
);
#else
static
__always_inline
void
*
kmem_cache_alloc_node_notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
kmem_cache_alloc_node_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
{
return
kmem_cache_alloc_node
(
cachep
,
flags
,
nodeid
);
}
...
...
@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
static
__always_inline
void
*
kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
struct
kmem_cache
*
cachep
;
void
*
ret
;
if
(
__builtin_constant_p
(
size
))
{
int
i
=
0
;
...
...
@@ -234,13 +233,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#endif
cachep
=
malloc_sizes
[
i
].
cs_cachep
;
ret
=
kmem_cache_alloc_node_notrace
(
cachep
,
flags
,
node
);
trace_kmalloc_node
(
_THIS_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
,
node
);
return
ret
;
return
kmem_cache_alloc_node_trace
(
size
,
cachep
,
flags
,
node
);
}
return
__kmalloc_node
(
size
,
flags
,
node
);
}
...
...
include/linux/slub_def.h
View file @
a45b0616
...
...
@@ -10,9 +10,8 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
#include <linux/kmemleak.h>
#include <
trace/events/kmem
.h>
#include <
linux/kmemleak
.h>
enum
stat_item
{
ALLOC_FASTPATH
,
/* Allocation from cpu slab */
...
...
@@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
void
*
kmem_cache_alloc
(
struct
kmem_cache
*
,
gfp_t
);
void
*
__kmalloc
(
size_t
size
,
gfp_t
flags
);
static
__always_inline
void
*
kmalloc_order
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
)
{
void
*
ret
=
(
void
*
)
__get_free_pages
(
flags
|
__GFP_COMP
,
order
);
kmemleak_alloc
(
ret
,
size
,
1
,
flags
);
return
ret
;
}
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
);
extern
void
*
kmem_cache_alloc_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
);
extern
void
*
kmalloc_order_trace
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
);
#else
static
__always_inline
void
*
kmem_cache_alloc_
notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
kmem_cache_alloc_
trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
)
{
return
kmem_cache_alloc
(
s
,
gfpflags
);
}
static
__always_inline
void
*
kmalloc_order_trace
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
)
{
return
kmalloc_order
(
size
,
flags
,
order
);
}
#endif
static
__always_inline
void
*
kmalloc_large
(
size_t
size
,
gfp_t
flags
)
{
unsigned
int
order
=
get_order
(
size
);
void
*
ret
=
(
void
*
)
__get_free_pages
(
flags
|
__GFP_COMP
,
order
);
kmemleak_alloc
(
ret
,
size
,
1
,
flags
);
trace_kmalloc
(
_THIS_IP_
,
ret
,
size
,
PAGE_SIZE
<<
order
,
flags
);
return
ret
;
return
kmalloc_order_trace
(
size
,
flags
,
order
);
}
static
__always_inline
void
*
kmalloc
(
size_t
size
,
gfp_t
flags
)
{
void
*
ret
;
if
(
__builtin_constant_p
(
size
))
{
if
(
size
>
SLUB_MAX_SIZE
)
return
kmalloc_large
(
size
,
flags
);
...
...
@@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if
(
!
s
)
return
ZERO_SIZE_PTR
;
ret
=
kmem_cache_alloc_notrace
(
s
,
flags
);
trace_kmalloc
(
_THIS_IP_
,
ret
,
size
,
s
->
size
,
flags
);
return
ret
;
return
kmem_cache_alloc_trace
(
s
,
flags
,
size
);
}
}
return
__kmalloc
(
size
,
flags
);
...
...
@@ -266,14 +270,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node);
void
*
kmem_cache_alloc_node
(
struct
kmem_cache
*
,
gfp_t
flags
,
int
node
);
#ifdef CONFIG_TRACING
extern
void
*
kmem_cache_alloc_node_
no
trace
(
struct
kmem_cache
*
s
,
extern
void
*
kmem_cache_alloc_node_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
);
int
node
,
size_t
size
);
#else
static
__always_inline
void
*
kmem_cache_alloc_node_
no
trace
(
struct
kmem_cache
*
s
,
kmem_cache_alloc_node_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
)
int
node
,
size_t
size
)
{
return
kmem_cache_alloc_node
(
s
,
gfpflags
,
node
);
}
...
...
@@ -281,8 +285,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
static
__always_inline
void
*
kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
void
*
ret
;
if
(
__builtin_constant_p
(
size
)
&&
size
<=
SLUB_MAX_SIZE
&&
!
(
flags
&
SLUB_DMA
))
{
struct
kmem_cache
*
s
=
kmalloc_slab
(
size
);
...
...
@@ -290,12 +292,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
if
(
!
s
)
return
ZERO_SIZE_PTR
;
ret
=
kmem_cache_alloc_node_notrace
(
s
,
flags
,
node
);
trace_kmalloc_node
(
_THIS_IP_
,
ret
,
size
,
s
->
size
,
flags
,
node
);
return
ret
;
return
kmem_cache_alloc_node_trace
(
s
,
flags
,
node
,
size
);
}
return
__kmalloc_node
(
size
,
flags
,
node
);
}
...
...
mm/slab.c
View file @
a45b0616
...
...
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
EXPORT_SYMBOL
(
kmem_cache_alloc
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
void
*
kmem_cache_alloc_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
)
{
return
__cache_alloc
(
cachep
,
flags
,
__builtin_return_address
(
0
));
void
*
ret
;
ret
=
__cache_alloc
(
cachep
,
flags
,
__builtin_return_address
(
0
));
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_
no
trace
);
EXPORT_SYMBOL
(
kmem_cache_alloc_trace
);
#endif
/**
...
...
@@ -3705,31 +3712,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
EXPORT_SYMBOL
(
kmem_cache_alloc_node
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_node_notrace
(
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
void
*
kmem_cache_alloc_node_trace
(
size_t
size
,
struct
kmem_cache
*
cachep
,
gfp_t
flags
,
int
nodeid
)
{
return
__cache_alloc_node
(
cachep
,
flags
,
nodeid
,
void
*
ret
;
ret
=
__cache_alloc_node
(
cachep
,
flags
,
nodeid
,
__builtin_return_address
(
0
));
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
slab_buffer_size
(
cachep
),
flags
,
nodeid
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_node_
no
trace
);
EXPORT_SYMBOL
(
kmem_cache_alloc_node_trace
);
#endif
static
__always_inline
void
*
__do_kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
,
void
*
caller
)
{
struct
kmem_cache
*
cachep
;
void
*
ret
;
cachep
=
kmem_find_general_cachep
(
size
,
flags
);
if
(
unlikely
(
ZERO_OR_NULL_PTR
(
cachep
)))
return
cachep
;
ret
=
kmem_cache_alloc_node_notrace
(
cachep
,
flags
,
node
);
trace_kmalloc_node
((
unsigned
long
)
caller
,
ret
,
size
,
cachep
->
buffer_size
,
flags
,
node
);
return
ret
;
return
kmem_cache_alloc_node_trace
(
size
,
cachep
,
flags
,
node
);
}
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
...
...
mm/slub.c
View file @
a45b0616
...
...
@@ -28,6 +28,8 @@
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <trace/events/kmem.h>
/*
* Lock order:
* 1. slab_lock(page)
...
...
@@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
EXPORT_SYMBOL
(
kmem_cache_alloc
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_notrace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
)
void
*
kmem_cache_alloc_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
size_t
size
)
{
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_trace
);
void
*
kmalloc_order_trace
(
size_t
size
,
gfp_t
flags
,
unsigned
int
order
)
{
return
slab_alloc
(
s
,
gfpflags
,
NUMA_NO_NODE
,
_RET_IP_
);
void
*
ret
=
kmalloc_order
(
size
,
flags
,
order
);
trace_kmalloc
(
_RET_IP_
,
ret
,
size
,
PAGE_SIZE
<<
order
,
flags
);
return
ret
;
}
EXPORT_SYMBOL
(
km
em_cache_alloc_no
trace
);
EXPORT_SYMBOL
(
km
alloc_order_
trace
);
#endif
#ifdef CONFIG_NUMA
...
...
@@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
EXPORT_SYMBOL
(
kmem_cache_alloc_node
);
#ifdef CONFIG_TRACING
void
*
kmem_cache_alloc_node_
no
trace
(
struct
kmem_cache
*
s
,
void
*
kmem_cache_alloc_node_trace
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
)
int
node
,
size_t
size
)
{
return
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
void
*
ret
=
slab_alloc
(
s
,
gfpflags
,
node
,
_RET_IP_
);
trace_kmalloc_node
(
_RET_IP_
,
ret
,
size
,
s
->
size
,
gfpflags
,
node
);
return
ret
;
}
EXPORT_SYMBOL
(
kmem_cache_alloc_node_
no
trace
);
EXPORT_SYMBOL
(
kmem_cache_alloc_node_trace
);
#endif
#endif
...
...
Documentation/vm
/slabinfo.c
→
tools/slub
/slabinfo.c
View file @
a45b0616
...
...
@@ -607,7 +607,7 @@ static int debug_opt_scan(char *opt)
}
for
(
;
*
opt
;
opt
++
)
switch
(
*
opt
)
{
switch
(
*
opt
)
{
case
'F'
:
case
'f'
:
if
(
sanity
)
return
0
;
...
...
@@ -1127,7 +1127,7 @@ static void read_slab_dir(void)
continue
;
switch
(
de
->
d_type
)
{
case
DT_LNK
:
alias
->
name
=
strdup
(
de
->
d_name
);
alias
->
name
=
strdup
(
de
->
d_name
);
count
=
readlink
(
de
->
d_name
,
buffer
,
sizeof
(
buffer
));
if
(
count
<
0
)
...
...
@@ -1143,7 +1143,7 @@ static void read_slab_dir(void)
case
DT_DIR
:
if
(
chdir
(
de
->
d_name
))
fatal
(
"Unable to access slab %s
\n
"
,
slab
->
name
);
slab
->
name
=
strdup
(
de
->
d_name
);
slab
->
name
=
strdup
(
de
->
d_name
);
slab
->
alias
=
0
;
slab
->
refs
=
0
;
slab
->
aliases
=
get_obj
(
"aliases"
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment