Commit e0a8144b authored by John L. Hammond's avatar John L. Hammond Committed by Greg Kroah-Hartman

staging/lustre/llite: use vui prefix for struct vvp_io members

Rename members of struct vvp_io to used to start with vui_ rather than
cui_.  Rename several instances of struct vvp_io * from cio to vio.
Signed-off-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-on: http://review.whamcloud.com/13363
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5971Reviewed-by: default avatarBobi Jam <bobijam@hotmail.com>
Reviewed-by: default avatarLai Siyao <lai.siyao@intel.com>
Reviewed-by: default avatarJinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 10cdef73
...@@ -1135,18 +1135,18 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, ...@@ -1135,18 +1135,18 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
ll_io_init(io, file, iot == CIT_WRITE); ll_io_init(io, file, iot == CIT_WRITE);
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) { if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *cio = vvp_env_io(env); struct vvp_io *vio = vvp_env_io(env);
int write_mutex_locked = 0; int write_mutex_locked = 0;
cio->cui_fd = LUSTRE_FPRIVATE(file); vio->vui_fd = LUSTRE_FPRIVATE(file);
cio->cui_io_subtype = args->via_io_subtype; vio->vui_io_subtype = args->via_io_subtype;
switch (cio->cui_io_subtype) { switch (vio->vui_io_subtype) {
case IO_NORMAL: case IO_NORMAL:
cio->cui_iter = args->u.normal.via_iter; vio->vui_iter = args->u.normal.via_iter;
cio->cui_iocb = args->u.normal.via_iocb; vio->vui_iocb = args->u.normal.via_iocb;
if ((iot == CIT_WRITE) && if ((iot == CIT_WRITE) &&
!(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
if (mutex_lock_interruptible(&lli-> if (mutex_lock_interruptible(&lli->
lli_write_mutex)) { lli_write_mutex)) {
result = -ERESTARTSYS; result = -ERESTARTSYS;
...@@ -1157,11 +1157,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args, ...@@ -1157,11 +1157,11 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
down_read(&lli->lli_trunc_sem); down_read(&lli->lli_trunc_sem);
break; break;
case IO_SPLICE: case IO_SPLICE:
cio->u.splice.cui_pipe = args->u.splice.via_pipe; vio->u.splice.vui_pipe = args->u.splice.via_pipe;
cio->u.splice.cui_flags = args->u.splice.via_flags; vio->u.splice.vui_flags = args->u.splice.via_flags;
break; break;
default: default:
CERROR("Unknown IO type - %u\n", cio->cui_io_subtype); CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
LBUG(); LBUG();
} }
result = cl_io_loop(env, io); result = cl_io_loop(env, io);
......
...@@ -210,19 +210,19 @@ int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io, ...@@ -210,19 +210,19 @@ int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
__u32 enqflags, enum cl_lock_mode mode, __u32 enqflags, enum cl_lock_mode mode,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
struct vvp_io *cio = vvp_env_io(env); struct vvp_io *vio = vvp_env_io(env);
struct cl_lock_descr *descr = &cio->cui_link.cill_descr; struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
struct cl_object *obj = io->ci_obj; struct cl_object *obj = io->ci_obj;
CLOBINVRNT(env, obj, vvp_object_invariant(obj)); CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end); CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
memset(&cio->cui_link, 0, sizeof(cio->cui_link)); memset(&vio->vui_link, 0, sizeof(vio->vui_link));
if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
descr->cld_mode = CLM_GROUP; descr->cld_mode = CLM_GROUP;
descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid; descr->cld_gid = vio->vui_fd->fd_grouplock.cg_gid;
} else { } else {
descr->cld_mode = mode; descr->cld_mode = mode;
} }
...@@ -231,19 +231,19 @@ int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io, ...@@ -231,19 +231,19 @@ int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
descr->cld_end = end; descr->cld_end = end;
descr->cld_enq_flags = enqflags; descr->cld_enq_flags = enqflags;
cl_io_lock_add(env, io, &cio->cui_link); cl_io_lock_add(env, io, &vio->vui_link);
return 0; return 0;
} }
void vvp_io_update_iov(const struct lu_env *env, void vvp_io_update_iov(const struct lu_env *env,
struct vvp_io *cio, struct cl_io *io) struct vvp_io *vio, struct cl_io *io)
{ {
size_t size = io->u.ci_rw.crw_count; size_t size = io->u.ci_rw.crw_count;
if (!cl_is_normalio(env, io) || !cio->cui_iter) if (!cl_is_normalio(env, io) || !vio->vui_iter)
return; return;
iov_iter_truncate(cio->cui_iter, size); iov_iter_truncate(vio->vui_iter, size);
} }
int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io, int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
...@@ -266,7 +266,7 @@ void vvp_io_advance(const struct lu_env *env, ...@@ -266,7 +266,7 @@ void vvp_io_advance(const struct lu_env *env,
const struct cl_io_slice *ios, const struct cl_io_slice *ios,
size_t nob) size_t nob)
{ {
struct vvp_io *cio = cl2vvp_io(env, ios); struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io; struct cl_io *io = ios->cis_io;
struct cl_object *obj = ios->cis_io->ci_obj; struct cl_object *obj = ios->cis_io->ci_obj;
...@@ -275,7 +275,7 @@ void vvp_io_advance(const struct lu_env *env, ...@@ -275,7 +275,7 @@ void vvp_io_advance(const struct lu_env *env,
if (!cl_is_normalio(env, io)) if (!cl_is_normalio(env, io))
return; return;
iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob); iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
} }
/** /**
...@@ -461,13 +461,13 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr) ...@@ -461,13 +461,13 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
again: again:
if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) { if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
struct vvp_io *cio = vvp_env_io(env); struct vvp_io *vio = vvp_env_io(env);
if (attr->ia_valid & ATTR_FILE) if (attr->ia_valid & ATTR_FILE)
/* populate the file descriptor for ftruncate to honor /* populate the file descriptor for ftruncate to honor
* group lock - see LU-787 * group lock - see LU-787
*/ */
cio->cui_fd = LUSTRE_FPRIVATE(attr->ia_file); vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file);
result = cl_io_loop(env, io); result = cl_io_loop(env, io);
} else { } else {
...@@ -496,12 +496,12 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr) ...@@ -496,12 +496,12 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
struct vvp_io *cl2vvp_io(const struct lu_env *env, struct vvp_io *cl2vvp_io(const struct lu_env *env,
const struct cl_io_slice *slice) const struct cl_io_slice *slice)
{ {
struct vvp_io *cio; struct vvp_io *vio;
cio = container_of(slice, struct vvp_io, cui_cl); vio = container_of(slice, struct vvp_io, vui_cl);
LASSERT(cio == vvp_env_io(env)); LASSERT(vio == vvp_env_io(env));
return cio; return vio;
} }
struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice) struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
......
...@@ -146,14 +146,14 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret, ...@@ -146,14 +146,14 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj); rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) { if (rc == 0) {
struct vvp_io *cio = vvp_env_io(env); struct vvp_io *vio = vvp_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
LASSERT(cio->cui_cl.cis_io == io); LASSERT(vio->vui_cl.cis_io == io);
/* mmap lock must be MANDATORY it has to cache pages. */ /* mmap lock must be MANDATORY it has to cache pages. */
io->ci_lockreq = CILR_MANDATORY; io->ci_lockreq = CILR_MANDATORY;
cio->cui_fd = fd; vio->vui_fd = fd;
} else { } else {
LASSERT(rc < 0); LASSERT(rc < 0);
cl_io_fini(env, io); cl_io_fini(env, io);
......
...@@ -90,7 +90,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage) ...@@ -90,7 +90,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
struct lu_env *env; struct lu_env *env;
struct cl_io *io; struct cl_io *io;
struct cl_object *clob; struct cl_object *clob;
struct vvp_io *cio; struct vvp_io *vio;
int refcheck; int refcheck;
int result = 0; int result = 0;
...@@ -108,8 +108,8 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage) ...@@ -108,8 +108,8 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
lcc->lcc_refcheck = refcheck; lcc->lcc_refcheck = refcheck;
lcc->lcc_cookie = current; lcc->lcc_cookie = current;
cio = vvp_env_io(env); vio = vvp_env_io(env);
io = cio->cui_cl.cis_io; io = vio->vui_cl.cis_io;
lcc->lcc_io = io; lcc->lcc_io = io;
if (!io) { if (!io) {
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
...@@ -125,7 +125,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage) ...@@ -125,7 +125,7 @@ struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
struct cl_page *page; struct cl_page *page;
LASSERT(io->ci_state == CIS_IO_GOING); LASSERT(io->ci_state == CIS_IO_GOING);
LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file)); LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
page = cl_page_find(env, clob, vmpage->index, vmpage, page = cl_page_find(env, clob, vmpage->index, vmpage,
CPT_CACHEABLE); CPT_CACHEABLE);
if (!IS_ERR(page)) { if (!IS_ERR(page)) {
...@@ -553,10 +553,10 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, ...@@ -553,10 +553,10 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
spin_lock(&ras->ras_lock); spin_lock(&ras->ras_lock);
/* Enlarge the RA window to encompass the full read */ /* Enlarge the RA window to encompass the full read */
if (vio->cui_ra_valid && if (vio->vui_ra_valid &&
ras->ras_window_start + ras->ras_window_len < ras->ras_window_start + ras->ras_window_len <
vio->cui_ra_start + vio->cui_ra_count) { vio->vui_ra_start + vio->vui_ra_count) {
ras->ras_window_len = vio->cui_ra_start + vio->cui_ra_count - ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
ras->ras_window_start; ras->ras_window_start;
} }
...@@ -615,15 +615,15 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, ...@@ -615,15 +615,15 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n", CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
PFID(lu_object_fid(&clob->co_lu)), PFID(lu_object_fid(&clob->co_lu)),
ria->ria_start, ria->ria_end, ria->ria_start, ria->ria_end,
vio->cui_ra_valid ? vio->cui_ra_start : 0, vio->vui_ra_valid ? vio->vui_ra_start : 0,
vio->cui_ra_valid ? vio->cui_ra_count : 0, vio->vui_ra_valid ? vio->vui_ra_count : 0,
hit); hit);
/* at least to extend the readahead window to cover current read */ /* at least to extend the readahead window to cover current read */
if (!hit && vio->cui_ra_valid && if (!hit && vio->vui_ra_valid &&
vio->cui_ra_start + vio->cui_ra_count > ria->ria_start) { vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) {
/* to the end of current read window. */ /* to the end of current read window. */
mlen = vio->cui_ra_start + vio->cui_ra_count - ria->ria_start; mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start;
/* trim to RPC boundary */ /* trim to RPC boundary */
start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1); start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1);
mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start); mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start);
......
...@@ -376,7 +376,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, ...@@ -376,7 +376,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
env = cl_env_get(&refcheck); env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env)); LASSERT(!IS_ERR(env));
io = vvp_env_io(env)->cui_cl.cis_io; io = vvp_env_io(env)->vui_cl.cis_io;
LASSERT(io); LASSERT(io);
/* 0. Need locking between buffered and direct access. and race with /* 0. Need locking between buffered and direct access. and race with
...@@ -439,10 +439,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, ...@@ -439,10 +439,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
inode_unlock(inode); inode_unlock(inode);
if (tot_bytes > 0) { if (tot_bytes > 0) {
struct vvp_io *cio = vvp_env_io(env); struct vvp_io *vio = vvp_env_io(env);
/* no commit async for direct IO */ /* no commit async for direct IO */
cio->u.write.cui_written += tot_bytes; vio->u.write.vui_written += tot_bytes;
} }
cl_env_put(env, &refcheck); cl_env_put(env, &refcheck);
...@@ -513,8 +513,8 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, ...@@ -513,8 +513,8 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
/* To avoid deadlock, try to lock page first. */ /* To avoid deadlock, try to lock page first. */
vmpage = grab_cache_page_nowait(mapping, index); vmpage = grab_cache_page_nowait(mapping, index);
if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) { if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
struct vvp_io *cio = vvp_env_io(env); struct vvp_io *vio = vvp_env_io(env);
struct cl_page_list *plist = &cio->u.write.cui_queue; struct cl_page_list *plist = &vio->u.write.vui_queue;
/* if the page is already in dirty cache, we have to commit /* if the page is already in dirty cache, we have to commit
* the pages right now; otherwise, it may cause deadlock * the pages right now; otherwise, it may cause deadlock
...@@ -595,7 +595,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping, ...@@ -595,7 +595,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
struct ll_cl_context *lcc = fsdata; struct ll_cl_context *lcc = fsdata;
struct lu_env *env; struct lu_env *env;
struct cl_io *io; struct cl_io *io;
struct vvp_io *cio; struct vvp_io *vio;
struct cl_page *page; struct cl_page *page;
unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned from = pos & (PAGE_CACHE_SIZE - 1);
bool unplug = false; bool unplug = false;
...@@ -606,21 +606,21 @@ static int ll_write_end(struct file *file, struct address_space *mapping, ...@@ -606,21 +606,21 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
env = lcc->lcc_env; env = lcc->lcc_env;
page = lcc->lcc_page; page = lcc->lcc_page;
io = lcc->lcc_io; io = lcc->lcc_io;
cio = vvp_env_io(env); vio = vvp_env_io(env);
LASSERT(cl_page_is_owned(page, io)); LASSERT(cl_page_is_owned(page, io));
if (copied > 0) { if (copied > 0) {
struct cl_page_list *plist = &cio->u.write.cui_queue; struct cl_page_list *plist = &vio->u.write.vui_queue;
lcc->lcc_page = NULL; /* page will be queued */ lcc->lcc_page = NULL; /* page will be queued */
/* Add it into write queue */ /* Add it into write queue */
cl_page_list_add(plist, page); cl_page_list_add(plist, page);
if (plist->pl_nr == 1) /* first page */ if (plist->pl_nr == 1) /* first page */
cio->u.write.cui_from = from; vio->u.write.vui_from = from;
else else
LASSERT(from == 0); LASSERT(from == 0);
cio->u.write.cui_to = from + copied; vio->u.write.vui_to = from + copied;
/* We may have one full RPC, commit it soon */ /* We may have one full RPC, commit it soon */
if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES) if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
......
...@@ -94,16 +94,16 @@ enum vvp_io_subtype { ...@@ -94,16 +94,16 @@ enum vvp_io_subtype {
*/ */
struct vvp_io { struct vvp_io {
/** super class */ /** super class */
struct cl_io_slice cui_cl; struct cl_io_slice vui_cl;
struct cl_io_lock_link cui_link; struct cl_io_lock_link vui_link;
/** /**
* I/O vector information to or from which read/write is going. * I/O vector information to or from which read/write is going.
*/ */
struct iov_iter *cui_iter; struct iov_iter *vui_iter;
/** /**
* Total size for the left IO. * Total size for the left IO.
*/ */
size_t cui_tot_count; size_t vui_tot_count;
union { union {
struct vvp_fault_io { struct vvp_fault_io {
...@@ -131,37 +131,37 @@ struct vvp_io { ...@@ -131,37 +131,37 @@ struct vvp_io {
bool ft_flags_valid; bool ft_flags_valid;
} fault; } fault;
struct { struct {
enum ccc_setattr_lock_type cui_local_lock; enum ccc_setattr_lock_type vui_local_lock;
} setattr; } setattr;
struct { struct {
struct pipe_inode_info *cui_pipe; struct pipe_inode_info *vui_pipe;
unsigned int cui_flags; unsigned int vui_flags;
} splice; } splice;
struct { struct {
struct cl_page_list cui_queue; struct cl_page_list vui_queue;
unsigned long cui_written; unsigned long vui_written;
int cui_from; int vui_from;
int cui_to; int vui_to;
} write; } write;
} u; } u;
enum vvp_io_subtype cui_io_subtype; enum vvp_io_subtype vui_io_subtype;
/** /**
* Layout version when this IO is initialized * Layout version when this IO is initialized
*/ */
__u32 cui_layout_gen; __u32 vui_layout_gen;
/** /**
* File descriptor against which IO is done. * File descriptor against which IO is done.
*/ */
struct ll_file_data *cui_fd; struct ll_file_data *vui_fd;
struct kiocb *cui_iocb; struct kiocb *vui_iocb;
/* Readahead state. */ /* Readahead state. */
pgoff_t cui_ra_start; pgoff_t vui_ra_start;
pgoff_t cui_ra_count; pgoff_t vui_ra_count;
/* Set when cui_ra_{start,count} have been initialized. */ /* Set when vui_ra_{start,count} have been initialized. */
bool cui_ra_valid; bool vui_ra_valid;
}; };
/** /**
......
...@@ -372,9 +372,9 @@ static int vvp_page_is_under_lock(const struct lu_env *env, ...@@ -372,9 +372,9 @@ static int vvp_page_is_under_lock(const struct lu_env *env,
{ {
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE || if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
io->ci_type == CIT_FAULT) { io->ci_type == CIT_FAULT) {
struct vvp_io *cio = vvp_env_io(env); struct vvp_io *vio = vvp_env_io(env);
if (unlikely(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
*max_index = CL_PAGE_EOF; *max_index = CL_PAGE_EOF;
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment