Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
355d79c8
Commit
355d79c8
authored
Dec 12, 2009
by
Pekka Enberg
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'slab/fixes', 'slab/kmemleak', 'slub/perf' and 'slub/stats' into for-linus
parents
053fe57a
8e15b79c
ddbf2e83
74e2134f
78eb00cc
Changes
3
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
145 additions
and
102 deletions
+145
-102
Documentation/ABI/testing/sysfs-kernel-slab
Documentation/ABI/testing/sysfs-kernel-slab
+58
-51
mm/slab.c
mm/slab.c
+69
-49
mm/slub.c
mm/slub.c
+18
-2
No files found.
Documentation/ABI/testing/sysfs-kernel-slab
View file @
355d79c8
This diff is collapsed.
Click to expand it.
mm/slab.c
View file @
355d79c8
...
...
@@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = {
#define BAD_ALIEN_MAGIC 0x01020304ul
/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static
enum
{
NONE
,
PARTIAL_AC
,
PARTIAL_L3
,
EARLY
,
FULL
}
g_cpucache_up
;
/*
* used by boot code to determine if it can use slab based allocator
*/
int
slab_is_available
(
void
)
{
return
g_cpucache_up
>=
EARLY
;
}
#ifdef CONFIG_LOCKDEP
/*
...
...
@@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = {
static
struct
lock_class_key
on_slab_l3_key
;
static
struct
lock_class_key
on_slab_alc_key
;
static
inline
void
init_lock_keys
(
void
)
static
void
init_node_lock_keys
(
int
q
)
{
int
q
;
struct
cache_sizes
*
s
=
malloc_sizes
;
while
(
s
->
cs_size
!=
ULONG_MAX
)
{
for_each_node
(
q
)
{
struct
array_cache
**
alc
;
int
r
;
struct
kmem_list3
*
l3
=
s
->
cs_cachep
->
nodelists
[
q
];
if
(
!
l3
||
OFF_SLAB
(
s
->
cs_cachep
))
continue
;
lockdep_set_class
(
&
l3
->
list_lock
,
&
on_slab_l3_key
);
alc
=
l3
->
alien
;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if
(
!
alc
||
(
unsigned
long
)
alc
==
BAD_ALIEN_MAGIC
)
continue
;
for_each_node
(
r
)
{
if
(
alc
[
r
])
lockdep_set_class
(
&
alc
[
r
]
->
lock
,
&
on_slab_alc_key
);
}
if
(
g_cpucache_up
!=
FULL
)
return
;
for
(
s
=
malloc_sizes
;
s
->
cs_size
!=
ULONG_MAX
;
s
++
)
{
struct
array_cache
**
alc
;
struct
kmem_list3
*
l3
;
int
r
;
l3
=
s
->
cs_cachep
->
nodelists
[
q
];
if
(
!
l3
||
OFF_SLAB
(
s
->
cs_cachep
))
return
;
lockdep_set_class
(
&
l3
->
list_lock
,
&
on_slab_l3_key
);
alc
=
l3
->
alien
;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if
(
!
alc
||
(
unsigned
long
)
alc
==
BAD_ALIEN_MAGIC
)
return
;
for_each_node
(
r
)
{
if
(
alc
[
r
])
lockdep_set_class
(
&
alc
[
r
]
->
lock
,
&
on_slab_alc_key
);
}
s
++
;
}
}
static
inline
void
init_lock_keys
(
void
)
{
int
node
;
for_each_node
(
node
)
init_node_lock_keys
(
node
);
}
#else
static
void
init_node_lock_keys
(
int
q
)
{
}
static
inline
void
init_lock_keys
(
void
)
{
}
...
...
@@ -665,26 +697,6 @@ static inline void init_lock_keys(void)
static
DEFINE_MUTEX
(
cache_chain_mutex
);
static
struct
list_head
cache_chain
;
/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static
enum
{
NONE
,
PARTIAL_AC
,
PARTIAL_L3
,
EARLY
,
FULL
}
g_cpucache_up
;
/*
* used by boot code to determine if it can use slab based allocator
*/
int
slab_is_available
(
void
)
{
return
g_cpucache_up
>=
EARLY
;
}
static
DEFINE_PER_CPU
(
struct
delayed_work
,
reap_work
);
static
inline
struct
array_cache
*
cpu_cache_get
(
struct
kmem_cache
*
cachep
)
...
...
@@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu)
kfree
(
shared
);
free_alien_cache
(
alien
);
}
init_node_lock_keys
(
node
);
return
0
;
bad:
cpuup_canceled
(
cpu
);
...
...
@@ -3103,13 +3117,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
}
else
{
STATS_INC_ALLOCMISS
(
cachep
);
objp
=
cache_alloc_refill
(
cachep
,
flags
);
/*
* the 'ac' may be updated by cache_alloc_refill(),
* and kmemleak_erase() requires its correct value.
*/
ac
=
cpu_cache_get
(
cachep
);
}
/*
* To avoid a false negative, if an object that is in one of the
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* treat the array pointers as a reference to the object.
*/
kmemleak_erase
(
&
ac
->
entry
[
ac
->
avail
]);
if
(
objp
)
kmemleak_erase
(
&
ac
->
entry
[
ac
->
avail
]);
return
objp
;
}
...
...
@@ -3306,7 +3326,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
cache_alloc_debugcheck_before
(
cachep
,
flags
);
local_irq_save
(
save_flags
);
if
(
unlikely
(
nodeid
==
-
1
)
)
if
(
nodeid
==
-
1
)
nodeid
=
numa_node_id
();
if
(
unlikely
(
!
cachep
->
nodelists
[
nodeid
]))
{
...
...
mm/slub.c
View file @
355d79c8
...
...
@@ -1735,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
}
local_irq_restore
(
flags
);
if
(
unlikely
(
(
gfpflags
&
__GFP_ZERO
)
&&
object
)
)
if
(
unlikely
(
gfpflags
&
__GFP_ZERO
)
&&
object
)
memset
(
object
,
0
,
objsize
);
kmemcheck_slab_alloc
(
s
,
gfpflags
,
object
,
c
->
objsize
);
...
...
@@ -4371,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
return
len
+
sprintf
(
buf
+
len
,
"
\n
"
);
}
static
void
clear_stat
(
struct
kmem_cache
*
s
,
enum
stat_item
si
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
get_cpu_slab
(
s
,
cpu
)
->
stat
[
si
]
=
0
;
}
#define STAT_ATTR(si, text) \
static ssize_t text##_show(struct kmem_cache *s, char *buf) \
{ \
return show_stat(s, buf, si); \
} \
SLAB_ATTR_RO(text); \
static ssize_t text##_store(struct kmem_cache *s, \
const char *buf, size_t length) \
{ \
if (buf[0] != '0') \
return -EINVAL; \
clear_stat(s, si); \
return length; \
} \
SLAB_ATTR(text); \
STAT_ATTR
(
ALLOC_FASTPATH
,
alloc_fastpath
);
STAT_ATTR
(
ALLOC_SLOWPATH
,
alloc_slowpath
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment