Commit 8a48df70 authored by John L. Hammond's avatar John L. Hammond Committed by Greg Kroah-Hartman

staging/lustre/llite: handle io init failure in ll_fault_io_init()

In ll_fault_io_init(), if cl_io_init() has failed then cleanup and
return an ERR_PTR(). This fixes an oops in the page fault handling
code when a partially initialized io is used. In ll_page_mkwrite0() do
not call cl_io_fini() on an ERR_PTR().

Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-3487
Lustre-change: http://review.whamcloud.com/6735Signed-off-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-by: default avatarLai Siyao <lai.siyao@intel.com>
Reviewed-by: default avatarJinshan Xiong <jinshan.xiong@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarPeng Tao <tao.peng@emc.com>
Signed-off-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 78eb9092
...@@ -106,11 +106,12 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, ...@@ -106,11 +106,12 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
struct cl_env_nest *nest, struct cl_env_nest *nest,
pgoff_t index, unsigned long *ra_flags) pgoff_t index, unsigned long *ra_flags)
{ {
struct file *file = vma->vm_file; struct file *file = vma->vm_file;
struct inode *inode = file->f_dentry->d_inode; struct inode *inode = file->f_dentry->d_inode;
struct cl_io *io; struct cl_io *io;
struct cl_fault_io *fio; struct cl_fault_io *fio;
struct lu_env *env; struct lu_env *env;
int rc;
ENTRY; ENTRY;
*env_ret = NULL; *env_ret = NULL;
...@@ -151,17 +152,22 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma, ...@@ -151,17 +152,22 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags, CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
fio->ft_index, fio->ft_executable); fio->ft_index, fio->ft_executable);
if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) { rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) {
struct ccc_io *cio = ccc_env_io(env); struct ccc_io *cio = ccc_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file); struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
LASSERT(cio->cui_cl.cis_io == io); LASSERT(cio->cui_cl.cis_io == io);
/* mmap lock must be MANDATORY /* mmap lock must be MANDATORY it has to cache
* it has to cache pages. */ * pages. */
io->ci_lockreq = CILR_MANDATORY; io->ci_lockreq = CILR_MANDATORY;
cio->cui_fd = fd;
cio->cui_fd = fd; } else {
LASSERT(rc < 0);
cl_io_fini(env, io);
cl_env_nested_put(nest, env);
io = ERR_PTR(rc);
} }
return io; return io;
...@@ -189,7 +195,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, ...@@ -189,7 +195,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
result = io->ci_result; result = io->ci_result;
if (result < 0) if (result < 0)
GOTO(out, result); GOTO(out_io, result);
io->u.ci_fault.ft_mkwrite = 1; io->u.ci_fault.ft_mkwrite = 1;
io->u.ci_fault.ft_writable = 1; io->u.ci_fault.ft_writable = 1;
...@@ -251,14 +257,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage, ...@@ -251,14 +257,14 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
} }
EXIT; EXIT;
out: out_io:
cl_io_fini(env, io); cl_io_fini(env, io);
cl_env_nested_put(&nest, env); cl_env_nested_put(&nest, env);
out:
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result); CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
LASSERT(ergo(result == 0, PageLocked(vmpage))); LASSERT(ergo(result == 0, PageLocked(vmpage)));
return(result);
return result;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment