Commit 4a4eee07 authored by John L. Hammond's avatar John L. Hammond Committed by Greg Kroah-Hartman

staging/lustre/llite: rename ccc_lock to vvp_lock

Rename struct ccc_lock to struct vvp_lock and merge the CCC lock
methods into the VVP lock methods.
Signed-off-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-on: http://review.whamcloud.com/13088
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5971Reviewed-by: default avatarJames Simmons <uja.ornl@gmail.com>
Reviewed-by: default avatarJinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3a52f803
...@@ -1227,7 +1227,7 @@ struct cl_lock { ...@@ -1227,7 +1227,7 @@ struct cl_lock {
/** /**
* Per-layer part of cl_lock * Per-layer part of cl_lock
* *
* \see ccc_lock, lov_lock, lovsub_lock, osc_lock * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
*/ */
struct cl_lock_slice { struct cl_lock_slice {
struct cl_lock *cls_lock; struct cl_lock *cls_lock;
...@@ -1254,7 +1254,7 @@ struct cl_lock_operations { ...@@ -1254,7 +1254,7 @@ struct cl_lock_operations {
* @anchor for resources * @anchor for resources
* \retval -ve failure * \retval -ve failure
* *
* \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(), * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
* \see osc_lock_enqueue() * \see osc_lock_enqueue()
*/ */
int (*clo_enqueue)(const struct lu_env *env, int (*clo_enqueue)(const struct lu_env *env,
...@@ -1270,7 +1270,7 @@ struct cl_lock_operations { ...@@ -1270,7 +1270,7 @@ struct cl_lock_operations {
/** /**
* Destructor. Frees resources and the slice. * Destructor. Frees resources and the slice.
* *
* \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(), * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
* \see osc_lock_fini() * \see osc_lock_fini()
*/ */
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice); void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
......
...@@ -67,17 +67,11 @@ static const struct cl_req_operations ccc_req_ops; ...@@ -67,17 +67,11 @@ static const struct cl_req_operations ccc_req_ops;
* ccc_ prefix stands for "Common Client Code". * ccc_ prefix stands for "Common Client Code".
*/ */
static struct kmem_cache *ccc_lock_kmem;
static struct kmem_cache *ccc_thread_kmem; static struct kmem_cache *ccc_thread_kmem;
static struct kmem_cache *ccc_session_kmem; static struct kmem_cache *ccc_session_kmem;
static struct kmem_cache *ccc_req_kmem; static struct kmem_cache *ccc_req_kmem;
static struct lu_kmem_descr ccc_caches[] = { static struct lu_kmem_descr ccc_caches[] = {
{
.ckd_cache = &ccc_lock_kmem,
.ckd_name = "ccc_lock_kmem",
.ckd_size = sizeof(struct ccc_lock)
},
{ {
.ckd_cache = &ccc_thread_kmem, .ckd_cache = &ccc_thread_kmem,
.ckd_name = "ccc_thread_kmem", .ckd_name = "ccc_thread_kmem",
...@@ -221,26 +215,6 @@ void ccc_global_fini(struct lu_device_type *device_type) ...@@ -221,26 +215,6 @@ void ccc_global_fini(struct lu_device_type *device_type)
lu_kmem_fini(ccc_caches); lu_kmem_fini(ccc_caches);
} }
int ccc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *unused,
const struct cl_lock_operations *lkops)
{
struct ccc_lock *clk;
int result;
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS);
if (clk) {
cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
result = 0;
} else {
result = -ENOMEM;
}
return result;
}
static void vvp_object_size_lock(struct cl_object *obj) static void vvp_object_size_lock(struct cl_object *obj)
{ {
struct inode *inode = vvp_object_inode(obj); struct inode *inode = vvp_object_inode(obj);
...@@ -257,27 +231,6 @@ static void vvp_object_size_unlock(struct cl_object *obj) ...@@ -257,27 +231,6 @@ static void vvp_object_size_unlock(struct cl_object *obj)
ll_inode_size_unlock(inode); ll_inode_size_unlock(inode);
} }
/*****************************************************************************
*
* Lock operations.
*
*/
void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
{
struct ccc_lock *clk = cl2ccc_lock(slice);
kmem_cache_free(ccc_lock_kmem, clk);
}
int ccc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
struct cl_io *unused, struct cl_sync_io *anchor)
{
CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
return 0;
}
/***************************************************************************** /*****************************************************************************
* *
* io operations. * io operations.
...@@ -571,11 +524,6 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr) ...@@ -571,11 +524,6 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
* *
*/ */
struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
{
return container_of(slice, struct ccc_lock, clk_cl);
}
struct ccc_io *cl2ccc_io(const struct lu_env *env, struct ccc_io *cl2ccc_io(const struct lu_env *env,
const struct cl_io_slice *slice) const struct cl_io_slice *slice)
{ {
......
...@@ -57,10 +57,16 @@ ...@@ -57,10 +57,16 @@
* "llite_" (var. "ll_") prefix. * "llite_" (var. "ll_") prefix.
*/ */
struct kmem_cache *vvp_lock_kmem;
struct kmem_cache *vvp_object_kmem; struct kmem_cache *vvp_object_kmem;
static struct kmem_cache *vvp_thread_kmem; static struct kmem_cache *vvp_thread_kmem;
static struct kmem_cache *vvp_session_kmem; static struct kmem_cache *vvp_session_kmem;
static struct lu_kmem_descr vvp_caches[] = { static struct lu_kmem_descr vvp_caches[] = {
{
.ckd_cache = &vvp_lock_kmem,
.ckd_name = "vvp_lock_kmem",
.ckd_size = sizeof(struct vvp_lock),
},
{ {
.ckd_cache = &vvp_object_kmem, .ckd_cache = &vvp_object_kmem,
.ckd_name = "vvp_object_kmem", .ckd_name = "vvp_object_kmem",
......
...@@ -128,6 +128,7 @@ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io); ...@@ -128,6 +128,7 @@ int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
extern struct lu_context_key ccc_key; extern struct lu_context_key ccc_key;
extern struct lu_context_key ccc_session_key; extern struct lu_context_key ccc_session_key;
extern struct kmem_cache *vvp_lock_kmem;
extern struct kmem_cache *vvp_object_kmem; extern struct kmem_cache *vvp_object_kmem;
struct ccc_thread_info { struct ccc_thread_info {
...@@ -269,8 +270,8 @@ struct vvp_device { ...@@ -269,8 +270,8 @@ struct vvp_device {
struct cl_device *vdv_next; struct cl_device *vdv_next;
}; };
struct ccc_lock { struct vvp_lock {
struct cl_lock_slice clk_cl; struct cl_lock_slice vlk_cl;
}; };
struct ccc_req { struct ccc_req {
...@@ -291,15 +292,6 @@ int ccc_req_init(const struct lu_env *env, struct cl_device *dev, ...@@ -291,15 +292,6 @@ int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
void ccc_umount(const struct lu_env *env, struct cl_device *dev); void ccc_umount(const struct lu_env *env, struct cl_device *dev);
int ccc_global_init(struct lu_device_type *device_type); int ccc_global_init(struct lu_device_type *device_type);
void ccc_global_fini(struct lu_device_type *device_type); void ccc_global_fini(struct lu_device_type *device_type);
int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io,
const struct cl_lock_operations *lkops);
void ccc_lock_delete(const struct lu_env *env,
const struct cl_lock_slice *slice);
void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
int ccc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
struct cl_io *io, struct cl_sync_io *anchor);
int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode, __u32 enqflags, enum cl_lock_mode mode,
...@@ -359,7 +351,11 @@ static inline struct page *cl2vm_page(const struct cl_page_slice *slice) ...@@ -359,7 +351,11 @@ static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
return cl2vvp_page(slice)->vpg_page; return cl2vvp_page(slice)->vpg_page;
} }
struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice); static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
{
return container_of(slice, struct vvp_lock, vlk_cl);
}
struct ccc_io *cl2ccc_io(const struct lu_env *env, struct ccc_io *cl2ccc_io(const struct lu_env *env,
const struct cl_io_slice *slice); const struct cl_io_slice *slice);
struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice); struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice);
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define DEBUG_SUBSYSTEM S_LLITE #define DEBUG_SUBSYSTEM S_LLITE
#include "../include/obd.h" #include "../include/obd_support.h"
#include "../include/lustre_lite.h" #include "../include/lustre_lite.h"
#include "vvp_internal.h" #include "vvp_internal.h"
...@@ -51,13 +51,41 @@ ...@@ -51,13 +51,41 @@
* *
*/ */
static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
{
struct vvp_lock *vlk = cl2vvp_lock(slice);
kmem_cache_free(vvp_lock_kmem, vlk);
}
static int vvp_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
struct cl_io *unused, struct cl_sync_io *anchor)
{
CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
return 0;
}
static const struct cl_lock_operations vvp_lock_ops = { static const struct cl_lock_operations vvp_lock_ops = {
.clo_fini = ccc_lock_fini, .clo_fini = vvp_lock_fini,
.clo_enqueue = ccc_lock_enqueue .clo_enqueue = vvp_lock_enqueue,
}; };
int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io) struct cl_lock *lock, const struct cl_io *unused)
{ {
return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops); struct vvp_lock *vlk;
int result;
CLOBINVRNT(env, obj, vvp_object_invariant(obj));
vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS);
if (vlk) {
cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
result = 0;
} else {
result = -ENOMEM;
}
return result;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment