Commit 10cdef73 authored by John L. Hammond's avatar John L. Hammond Committed by Greg Kroah-Hartman

staging/lustre/llite: merge ccc_io and vvp_io

Move the contents of struct vvp_io into struct ccc_io, delete the
former, and rename the latter to struct vvp_io. Rename various ccc_io
related functions to use vvp rather than ccc.
Signed-off-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-on: http://review.whamcloud.com/13351
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5971Reviewed-by: default avatarLai Siyao <lai.siyao@intel.com>
Reviewed-by: default avatarJinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bc4320a9
...@@ -1461,7 +1461,7 @@ enum cl_io_state { ...@@ -1461,7 +1461,7 @@ enum cl_io_state {
* This is usually embedded into layer session data, rather than allocated * This is usually embedded into layer session data, rather than allocated
* dynamically. * dynamically.
* *
* \see vvp_io, lov_io, osc_io, ccc_io * \see vvp_io, lov_io, osc_io
*/ */
struct cl_io_slice { struct cl_io_slice {
struct cl_io *cis_io; struct cl_io *cis_io;
......
...@@ -1135,14 +1135,13 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, ...@@ -1135,14 +1135,13 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
ll_io_init(io, file, iot == CIT_WRITE); ll_io_init(io, file, iot == CIT_WRITE);
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) { if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env); struct vvp_io *cio = vvp_env_io(env);
struct ccc_io *cio = ccc_env_io(env);
int write_mutex_locked = 0; int write_mutex_locked = 0;
cio->cui_fd = LUSTRE_FPRIVATE(file); cio->cui_fd = LUSTRE_FPRIVATE(file);
vio->cui_io_subtype = args->via_io_subtype; cio->cui_io_subtype = args->via_io_subtype;
switch (vio->cui_io_subtype) { switch (cio->cui_io_subtype) {
case IO_NORMAL: case IO_NORMAL:
cio->cui_iter = args->u.normal.via_iter; cio->cui_iter = args->u.normal.via_iter;
cio->cui_iocb = args->u.normal.via_iocb; cio->cui_iocb = args->u.normal.via_iocb;
...@@ -1158,11 +1157,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, ...@@ -1158,11 +1157,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
down_read(&lli->lli_trunc_sem); down_read(&lli->lli_trunc_sem);
break; break;
case IO_SPLICE: case IO_SPLICE:
vio->u.splice.cui_pipe = args->u.splice.via_pipe; cio->u.splice.cui_pipe = args->u.splice.via_pipe;
vio->u.splice.cui_flags = args->u.splice.via_flags; cio->u.splice.cui_flags = args->u.splice.via_flags;
break; break;
default: default:
CERROR("Unknown IO type - %u\n", vio->cui_io_subtype); CERROR("Unknown IO type - %u\n", cio->cui_io_subtype);
LBUG(); LBUG();
} }
result = cl_io_loop(env, io); result = cl_io_loop(env, io);
......
...@@ -68,7 +68,6 @@ static const struct cl_req_operations ccc_req_ops; ...@@ -68,7 +68,6 @@ static const struct cl_req_operations ccc_req_ops;
*/ */
static struct kmem_cache *ccc_thread_kmem; static struct kmem_cache *ccc_thread_kmem;
static struct kmem_cache *ccc_session_kmem;
static struct kmem_cache *ccc_req_kmem; static struct kmem_cache *ccc_req_kmem;
static struct lu_kmem_descr ccc_caches[] = { static struct lu_kmem_descr ccc_caches[] = {
...@@ -77,11 +76,6 @@ static struct lu_kmem_descr ccc_caches[] = { ...@@ -77,11 +76,6 @@ static struct lu_kmem_descr ccc_caches[] = {
.ckd_name = "ccc_thread_kmem", .ckd_name = "ccc_thread_kmem",
.ckd_size = sizeof(struct ccc_thread_info), .ckd_size = sizeof(struct ccc_thread_info),
}, },
{
.ckd_cache = &ccc_session_kmem,
.ckd_name = "ccc_session_kmem",
.ckd_size = sizeof(struct ccc_session)
},
{ {
.ckd_cache = &ccc_req_kmem, .ckd_cache = &ccc_req_kmem,
.ckd_name = "ccc_req_kmem", .ckd_name = "ccc_req_kmem",
...@@ -116,37 +110,12 @@ void ccc_key_fini(const struct lu_context *ctx, ...@@ -116,37 +110,12 @@ void ccc_key_fini(const struct lu_context *ctx,
kmem_cache_free(ccc_thread_kmem, info); kmem_cache_free(ccc_thread_kmem, info);
} }
void *ccc_session_key_init(const struct lu_context *ctx,
struct lu_context_key *key)
{
struct ccc_session *session;
session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS);
if (!session)
session = ERR_PTR(-ENOMEM);
return session;
}
void ccc_session_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void *data)
{
struct ccc_session *session = data;
kmem_cache_free(ccc_session_kmem, session);
}
struct lu_context_key ccc_key = { struct lu_context_key ccc_key = {
.lct_tags = LCT_CL_THREAD, .lct_tags = LCT_CL_THREAD,
.lct_init = ccc_key_init, .lct_init = ccc_key_init,
.lct_fini = ccc_key_fini .lct_fini = ccc_key_fini
}; };
struct lu_context_key ccc_session_key = {
.lct_tags = LCT_SESSION,
.lct_init = ccc_session_key_init,
.lct_fini = ccc_session_key_fini
};
int ccc_req_init(const struct lu_env *env, struct cl_device *dev, int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
struct cl_req *req) struct cl_req *req)
{ {
...@@ -237,11 +206,11 @@ static void vvp_object_size_unlock(struct cl_object *obj) ...@@ -237,11 +206,11 @@ static void vvp_object_size_unlock(struct cl_object *obj)
* *
*/ */
int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode, __u32 enqflags, enum cl_lock_mode mode,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
struct ccc_io *cio = ccc_env_io(env); struct vvp_io *cio = vvp_env_io(env);
struct cl_lock_descr *descr = &cio->cui_link.cill_descr; struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
struct cl_object *obj = io->ci_obj; struct cl_object *obj = io->ci_obj;
...@@ -266,8 +235,8 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, ...@@ -266,8 +235,8 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
return 0; return 0;
} }
void ccc_io_update_iov(const struct lu_env *env, void vvp_io_update_iov(const struct lu_env *env,
struct ccc_io *cio, struct cl_io *io) struct vvp_io *cio, struct cl_io *io)
{ {
size_t size = io->u.ci_rw.crw_count; size_t size = io->u.ci_rw.crw_count;
...@@ -277,27 +246,27 @@ void ccc_io_update_iov(const struct lu_env *env, ...@@ -277,27 +246,27 @@ void ccc_io_update_iov(const struct lu_env *env,
iov_iter_truncate(cio->cui_iter, size); iov_iter_truncate(cio->cui_iter, size);
} }
int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode, __u32 enqflags, enum cl_lock_mode mode,
loff_t start, loff_t end) loff_t start, loff_t end)
{ {
struct cl_object *obj = io->ci_obj; struct cl_object *obj = io->ci_obj;
return ccc_io_one_lock_index(env, io, enqflags, mode, return vvp_io_one_lock_index(env, io, enqflags, mode,
cl_index(obj, start), cl_index(obj, end)); cl_index(obj, start), cl_index(obj, end));
} }
void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios) void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
{ {
CLOBINVRNT(env, ios->cis_io->ci_obj, CLOBINVRNT(env, ios->cis_io->ci_obj,
vvp_object_invariant(ios->cis_io->ci_obj)); vvp_object_invariant(ios->cis_io->ci_obj));
} }
void ccc_io_advance(const struct lu_env *env, void vvp_io_advance(const struct lu_env *env,
const struct cl_io_slice *ios, const struct cl_io_slice *ios,
size_t nob) size_t nob)
{ {
struct ccc_io *cio = cl2ccc_io(env, ios); struct vvp_io *cio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io; struct cl_io *io = ios->cis_io;
struct cl_object *obj = ios->cis_io->ci_obj; struct cl_object *obj = ios->cis_io->ci_obj;
...@@ -492,7 +461,7 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr) ...@@ -492,7 +461,7 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
again: again:
if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
struct ccc_io *cio = ccc_env_io(env); struct vvp_io *cio = vvp_env_io(env);
if (attr->ia_valid & ATTR_FILE) if (attr->ia_valid & ATTR_FILE)
/* populate the file descriptor for ftruncate to honor /* populate the file descriptor for ftruncate to honor
...@@ -524,13 +493,14 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr) ...@@ -524,13 +493,14 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
* *
*/ */
struct ccc_io *cl2ccc_io(const struct lu_env *env, struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice) const struct cl_io_slice *slice)
{ {
struct ccc_io *cio; struct vvp_io *cio;
cio = container_of(slice, struct vvp_io, cui_cl);
LASSERT(cio == vvp_env_io(env));
cio = container_of(slice, struct ccc_io, cui_cl);
LASSERT(cio == ccc_env_io(env));
return cio; return cio;
} }
......
...@@ -817,59 +817,6 @@ struct ll_close_queue { ...@@ -817,59 +817,6 @@ struct ll_close_queue {
void vvp_write_pending(struct vvp_object *club, struct vvp_page *page); void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
void vvp_write_complete(struct vvp_object *club, struct vvp_page *page); void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
/* specific architecture can implement only part of this list */
enum vvp_io_subtype {
/** normal IO */
IO_NORMAL,
/** io started from splice_{read|write} */
IO_SPLICE
};
/* IO subtypes */
struct vvp_io {
/** io subtype */
enum vvp_io_subtype cui_io_subtype;
union {
struct {
struct pipe_inode_info *cui_pipe;
unsigned int cui_flags;
} splice;
struct vvp_fault_io {
/**
* Inode modification time that is checked across DLM
* lock request.
*/
time64_t ft_mtime;
struct vm_area_struct *ft_vma;
/**
* locked page returned from vvp_io
*/
struct page *ft_vmpage;
struct vm_fault_api {
/**
* kernel fault info
*/
struct vm_fault *ft_vmf;
/**
* fault API used bitflags for return code.
*/
unsigned int ft_flags;
/**
* check that flags are from filemap_fault
*/
bool ft_flags_valid;
} fault;
} fault;
} u;
/* Readahead state. */
pgoff_t cui_ra_start;
pgoff_t cui_ra_count;
/* Set when cui_ra_{start,count} have been initialized. */
bool cui_ra_valid;
};
/** /**
* IO arguments for various VFS I/O interfaces. * IO arguments for various VFS I/O interfaces.
*/ */
...@@ -923,25 +870,6 @@ static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env, ...@@ -923,25 +870,6 @@ static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
return ret; return ret;
} }
struct vvp_session {
struct vvp_io vs_ios;
};
static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
{
extern struct lu_context_key vvp_session_key;
struct vvp_session *ses;
ses = lu_context_key_get(env->le_ses, &vvp_session_key);
LASSERT(ses);
return ses;
}
static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
{
return &vvp_env_session(env)->vs_ios;
}
int vvp_global_init(void); int vvp_global_init(void);
void vvp_global_fini(void); void vvp_global_fini(void);
......
...@@ -146,7 +146,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, ...@@ -146,7 +146,7 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) { if (rc == 0) {
struct ccc_io *cio = ccc_env_io(env); struct vvp_io *cio = vvp_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
LASSERT(cio->cui_cl.cis_io == io); LASSERT(cio->cui_cl.cis_io == io);
...@@ -307,17 +307,17 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -307,17 +307,17 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vio = vvp_env_io(env); vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma; vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = NULL; vio->u.fault.ft_vmpage = NULL;
vio->u.fault.fault.ft_vmf = vmf; vio->u.fault.ft_vmf = vmf;
vio->u.fault.fault.ft_flags = 0; vio->u.fault.ft_flags = 0;
vio->u.fault.fault.ft_flags_valid = false; vio->u.fault.ft_flags_valid = false;
result = cl_io_loop(env, io); result = cl_io_loop(env, io);
/* ft_flags are only valid if we reached /* ft_flags are only valid if we reached
* the call to filemap_fault * the call to filemap_fault
*/ */
if (vio->u.fault.fault.ft_flags_valid) if (vio->u.fault.ft_flags_valid)
fault_ret = vio->u.fault.fault.ft_flags; fault_ret = vio->u.fault.ft_flags;
vmpage = vio->u.fault.ft_vmpage; vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage) { if (result != 0 && vmpage) {
......
...@@ -90,7 +90,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage) ...@@ -90,7 +90,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
struct lu_env *env; struct lu_env *env;
struct cl_io *io; struct cl_io *io;
struct cl_object *clob; struct cl_object *clob;
struct ccc_io *cio; struct vvp_io *cio;
int refcheck; int refcheck;
int result = 0; int result = 0;
...@@ -108,7 +108,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage) ...@@ -108,7 +108,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
lcc->lcc_refcheck = refcheck; lcc->lcc_refcheck = refcheck;
lcc->lcc_cookie = current; lcc->lcc_cookie = current;
cio = ccc_env_io(env); cio = vvp_env_io(env);
io = cio->cui_cl.cis_io; io = cio->cui_cl.cis_io;
lcc->lcc_io = io; lcc->lcc_io = io;
if (!io) { if (!io) {
......
...@@ -376,7 +376,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, ...@@ -376,7 +376,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
env = cl_env_get(&refcheck); env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env)); LASSERT(!IS_ERR(env));
io = ccc_env_io(env)->cui_cl.cis_io; io = vvp_env_io(env)->cui_cl.cis_io;
LASSERT(io); LASSERT(io);
/* 0. Need locking between buffered and direct access. and race with /* 0. Need locking between buffered and direct access. and race with
...@@ -439,7 +439,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, ...@@ -439,7 +439,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
inode_unlock(inode); inode_unlock(inode);
if (tot_bytes > 0) { if (tot_bytes > 0) {
struct ccc_io *cio = ccc_env_io(env); struct vvp_io *cio = vvp_env_io(env);
/* no commit async for direct IO */ /* no commit async for direct IO */
cio->u.write.cui_written += tot_bytes; cio->u.write.cui_written += tot_bytes;
...@@ -513,7 +513,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, ...@@ -513,7 +513,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
/* To avoid deadlock, try to lock page first. */ /* To avoid deadlock, try to lock page first. */
vmpage = grab_cache_page_nowait(mapping, index); vmpage = grab_cache_page_nowait(mapping, index);
if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) { if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
struct ccc_io *cio = ccc_env_io(env); struct vvp_io *cio = vvp_env_io(env);
struct cl_page_list *plist = &cio->u.write.cui_queue; struct cl_page_list *plist = &cio->u.write.cui_queue;
/* if the page is already in dirty cache, we have to commit /* if the page is already in dirty cache, we have to commit
...@@ -595,7 +595,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping, ...@@ -595,7 +595,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
struct ll_cl_context *lcc = fsdata; struct ll_cl_context *lcc = fsdata;
struct lu_env *env; struct lu_env *env;
struct cl_io *io; struct cl_io *io;
struct ccc_io *cio; struct vvp_io *cio;
struct cl_page *page; struct cl_page *page;
unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned from = pos & (PAGE_CACHE_SIZE - 1);
bool unplug = false; bool unplug = false;
...@@ -606,7 +606,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping, ...@@ -606,7 +606,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
env = lcc->lcc_env; env = lcc->lcc_env;
page = lcc->lcc_page; page = lcc->lcc_page;
io = lcc->lcc_io; io = lcc->lcc_io;
cio = ccc_env_io(env); cio = vvp_env_io(env);
LASSERT(cl_page_is_owned(page, io)); LASSERT(cl_page_is_owned(page, io));
if (copied > 0) { if (copied > 0) {
......
...@@ -138,7 +138,7 @@ struct lu_context_key vvp_session_key = { ...@@ -138,7 +138,7 @@ struct lu_context_key vvp_session_key = {
}; };
/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */ /* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key); LU_TYPE_INIT_FINI(vvp, &ccc_key, &vvp_key, &vvp_session_key);
static const struct lu_device_operations vvp_lu_ops = { static const struct lu_device_operations vvp_lu_ops = {
.ldo_object_alloc = vvp_object_alloc .ldo_object_alloc = vvp_object_alloc
......
...@@ -81,10 +81,18 @@ enum ccc_setattr_lock_type { ...@@ -81,10 +81,18 @@ enum ccc_setattr_lock_type {
SETATTR_MATCH_LOCK SETATTR_MATCH_LOCK
}; };
/* specific architecture can implement only part of this list */
enum vvp_io_subtype {
/** normal IO */
IO_NORMAL,
/** io started from splice_{read|write} */
IO_SPLICE
};
/** /**
* IO state private to vvp or slp layers. * IO state private to IO state private to VVP layer.
*/ */
struct ccc_io { struct vvp_io {
/** super class */ /** super class */
struct cl_io_slice cui_cl; struct cl_io_slice cui_cl;
struct cl_io_lock_link cui_link; struct cl_io_lock_link cui_link;
...@@ -98,9 +106,37 @@ struct ccc_io { ...@@ -98,9 +106,37 @@ struct ccc_io {
size_t cui_tot_count; size_t cui_tot_count;
union { union {
struct vvp_fault_io {
/**
* Inode modification time that is checked across DLM
* lock request.
*/
time64_t ft_mtime;
struct vm_area_struct *ft_vma;
/**
* locked page returned from vvp_io
*/
struct page *ft_vmpage;
/**
* kernel fault info
*/
struct vm_fault *ft_vmf;
/**
* fault API used bitflags for return code.
*/
unsigned int ft_flags;
/**
* check that flags are from filemap_fault
*/
bool ft_flags_valid;
} fault;
struct { struct {
enum ccc_setattr_lock_type cui_local_lock; enum ccc_setattr_lock_type cui_local_lock;
} setattr; } setattr;
struct {
struct pipe_inode_info *cui_pipe;
unsigned int cui_flags;
} splice;
struct { struct {
struct cl_page_list cui_queue; struct cl_page_list cui_queue;
unsigned long cui_written; unsigned long cui_written;
...@@ -108,6 +144,9 @@ struct ccc_io { ...@@ -108,6 +144,9 @@ struct ccc_io {
int cui_to; int cui_to;
} write; } write;
} u; } u;
enum vvp_io_subtype cui_io_subtype;
/** /**
* Layout version when this IO is initialized * Layout version when this IO is initialized
*/ */
...@@ -117,6 +156,12 @@ struct ccc_io { ...@@ -117,6 +156,12 @@ struct ccc_io {
*/ */
struct ll_file_data *cui_fd; struct ll_file_data *cui_fd;
struct kiocb *cui_iocb; struct kiocb *cui_iocb;
/* Readahead state. */
pgoff_t cui_ra_start;
pgoff_t cui_ra_count;
/* Set when cui_ra_{start,count} have been initialized. */
bool cui_ra_valid;
}; };
/** /**
...@@ -126,7 +171,7 @@ struct ccc_io { ...@@ -126,7 +171,7 @@ struct ccc_io {
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io); int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
extern struct lu_context_key ccc_key; extern struct lu_context_key ccc_key;
extern struct lu_context_key ccc_session_key; extern struct lu_context_key vvp_session_key;
extern struct kmem_cache *vvp_lock_kmem; extern struct kmem_cache *vvp_lock_kmem;
extern struct kmem_cache *vvp_object_kmem; extern struct kmem_cache *vvp_object_kmem;
...@@ -174,23 +219,23 @@ static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env) ...@@ -174,23 +219,23 @@ static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
return io; return io;
} }
struct ccc_session { struct vvp_session {
struct ccc_io cs_ios; struct vvp_io cs_ios;
}; };
static inline struct ccc_session *ccc_env_session(const struct lu_env *env) static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
{ {
struct ccc_session *ses; struct vvp_session *ses;
ses = lu_context_key_get(env->le_ses, &ccc_session_key); ses = lu_context_key_get(env->le_ses, &vvp_session_key);
LASSERT(ses); LASSERT(ses);
return ses; return ses;
} }
static inline struct ccc_io *ccc_env_io(const struct lu_env *env) static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
{ {
return &ccc_env_session(env)->cs_ios; return &vvp_env_session(env)->cs_ios;
} }
/** /**
...@@ -282,10 +327,6 @@ void *ccc_key_init(const struct lu_context *ctx, ...@@ -282,10 +327,6 @@ void *ccc_key_init(const struct lu_context *ctx,
struct lu_context_key *key); struct lu_context_key *key);
void ccc_key_fini(const struct lu_context *ctx, void ccc_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void *data); struct lu_context_key *key, void *data);
void *ccc_session_key_init(const struct lu_context *ctx,
struct lu_context_key *key);
void ccc_session_key_fini(const struct lu_context *ctx,
struct lu_context_key *key, void *data);
int ccc_req_init(const struct lu_env *env, struct cl_device *dev, int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
struct cl_req *req); struct cl_req *req);
...@@ -293,16 +334,16 @@ void ccc_umount(const struct lu_env *env, struct cl_device *dev); ...@@ -293,16 +334,16 @@ void ccc_umount(const struct lu_env *env, struct cl_device *dev);
int ccc_global_init(struct lu_device_type *device_type); int ccc_global_init(struct lu_device_type *device_type);
void ccc_global_fini(struct lu_device_type *device_type); void ccc_global_fini(struct lu_device_type *device_type);
int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io, int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode, __u32 enqflags, enum cl_lock_mode mode,
pgoff_t start, pgoff_t end); pgoff_t start, pgoff_t end);
int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io, int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode, __u32 enqflags, enum cl_lock_mode mode,
loff_t start, loff_t end); loff_t start, loff_t end);
void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios); void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios, void vvp_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
size_t nob); size_t nob);
void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio, void vvp_io_update_iov(const struct lu_env *env, struct vvp_io *cio,
struct cl_io *io); struct cl_io *io);
int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io, loff_t start, size_t count, int *exceed); struct cl_io *io, loff_t start, size_t count, int *exceed);
...@@ -356,7 +397,7 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice) ...@@ -356,7 +397,7 @@ static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
return container_of(slice, struct vvp_lock, vlk_cl); return container_of(slice, struct vvp_lock, vlk_cl);
} }
struct ccc_io *cl2ccc_io(const struct lu_env *env, struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice); const struct cl_io_slice *slice);
struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice); struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice);
......
...@@ -372,7 +372,7 @@ static int vvp_page_is_under_lock(const struct lu_env *env, ...@@ -372,7 +372,7 @@ static int vvp_page_is_under_lock(const struct lu_env *env,
{ {
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
io->ci_type == CIT_FAULT) { io->ci_type == CIT_FAULT) {
struct ccc_io *cio = ccc_env_io(env); struct vvp_io *cio = vvp_env_io(env);
if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
*max_index = CL_PAGE_EOF; *max_index = CL_PAGE_EOF;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment