Commit ab58a5a1 authored by Xiubo Li's avatar Xiubo Li Committed by Ilya Dryomov

ceph: move to a dedicated slabcache for ceph_cap_snap

There could be huge number of capsnaps around at any given time. On
x86_64 the structure is 248 bytes, which will be rounded up to 256 bytes
by kzalloc. Move this to a dedicated slabcache to save 8 bytes for each.

[ jlayton: use kmem_cache_zalloc ]
Signed-off-by: default avatarXiubo Li <xiubli@redhat.com>
Signed-off-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarIlya Dryomov <idryomov@gmail.com>
parent 6ddf5f16
...@@ -482,7 +482,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci) ...@@ -482,7 +482,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
struct ceph_buffer *old_blob = NULL; struct ceph_buffer *old_blob = NULL;
int used, dirty; int used, dirty;
capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS);
if (!capsnap) { if (!capsnap) {
pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
return; return;
...@@ -603,7 +603,8 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci) ...@@ -603,7 +603,8 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci)
spin_unlock(&ci->i_ceph_lock); spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob); ceph_buffer_put(old_blob);
kfree(capsnap); if (capsnap)
kmem_cache_free(ceph_cap_snap_cachep, capsnap);
ceph_put_snap_context(old_snapc); ceph_put_snap_context(old_snapc);
} }
......
...@@ -864,6 +864,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) ...@@ -864,6 +864,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
*/ */
struct kmem_cache *ceph_inode_cachep; struct kmem_cache *ceph_inode_cachep;
struct kmem_cache *ceph_cap_cachep; struct kmem_cache *ceph_cap_cachep;
struct kmem_cache *ceph_cap_snap_cachep;
struct kmem_cache *ceph_cap_flush_cachep; struct kmem_cache *ceph_cap_flush_cachep;
struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_dentry_cachep;
struct kmem_cache *ceph_file_cachep; struct kmem_cache *ceph_file_cachep;
...@@ -892,6 +893,9 @@ static int __init init_caches(void) ...@@ -892,6 +893,9 @@ static int __init init_caches(void)
ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD); ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
if (!ceph_cap_cachep) if (!ceph_cap_cachep)
goto bad_cap; goto bad_cap;
ceph_cap_snap_cachep = KMEM_CACHE(ceph_cap_snap, SLAB_MEM_SPREAD);
if (!ceph_cap_snap_cachep)
goto bad_cap_snap;
ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush, ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
if (!ceph_cap_flush_cachep) if (!ceph_cap_flush_cachep)
...@@ -931,6 +935,8 @@ static int __init init_caches(void) ...@@ -931,6 +935,8 @@ static int __init init_caches(void)
bad_dentry: bad_dentry:
kmem_cache_destroy(ceph_cap_flush_cachep); kmem_cache_destroy(ceph_cap_flush_cachep);
bad_cap_flush: bad_cap_flush:
kmem_cache_destroy(ceph_cap_snap_cachep);
bad_cap_snap:
kmem_cache_destroy(ceph_cap_cachep); kmem_cache_destroy(ceph_cap_cachep);
bad_cap: bad_cap:
kmem_cache_destroy(ceph_inode_cachep); kmem_cache_destroy(ceph_inode_cachep);
...@@ -947,6 +953,7 @@ static void destroy_caches(void) ...@@ -947,6 +953,7 @@ static void destroy_caches(void)
kmem_cache_destroy(ceph_inode_cachep); kmem_cache_destroy(ceph_inode_cachep);
kmem_cache_destroy(ceph_cap_cachep); kmem_cache_destroy(ceph_cap_cachep);
kmem_cache_destroy(ceph_cap_snap_cachep);
kmem_cache_destroy(ceph_cap_flush_cachep); kmem_cache_destroy(ceph_cap_flush_cachep);
kmem_cache_destroy(ceph_dentry_cachep); kmem_cache_destroy(ceph_dentry_cachep);
kmem_cache_destroy(ceph_file_cachep); kmem_cache_destroy(ceph_file_cachep);
......
...@@ -230,7 +230,7 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) ...@@ -230,7 +230,7 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
if (refcount_dec_and_test(&capsnap->nref)) { if (refcount_dec_and_test(&capsnap->nref)) {
if (capsnap->xattr_blob) if (capsnap->xattr_blob)
ceph_buffer_put(capsnap->xattr_blob); ceph_buffer_put(capsnap->xattr_blob);
kfree(capsnap); kmem_cache_free(ceph_cap_snap_cachep, capsnap);
} }
} }
......
...@@ -284,6 +284,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) ...@@ -284,6 +284,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
extern struct kmem_cache *ceph_inode_cachep; extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep; extern struct kmem_cache *ceph_cap_cachep;
extern struct kmem_cache *ceph_cap_snap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep; extern struct kmem_cache *ceph_file_cachep;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment