Commit da82f7e7 authored by Jeff Layton's avatar Jeff Layton

cifs: convert cifs_iovec_write to use async writes

Signed-off-by: default avatarJeff Layton <jlayton@redhat.com>
Reviewed-by: default avatarPavel Shilovsky <piastry@etersoft.ru>
parent 597b027f
...@@ -483,6 +483,8 @@ int cifs_async_readv(struct cifs_readdata *rdata); ...@@ -483,6 +483,8 @@ int cifs_async_readv(struct cifs_readdata *rdata);
/* asynchronous write support */ /* asynchronous write support */
struct cifs_writedata { struct cifs_writedata {
struct kref refcount; struct kref refcount;
struct list_head list;
struct completion done;
enum writeback_sync_modes sync_mode; enum writeback_sync_modes sync_mode;
struct work_struct work; struct work_struct work;
struct cifsFileInfo *cfile; struct cifsFileInfo *cfile;
......
...@@ -2081,8 +2081,10 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) ...@@ -2081,8 +2081,10 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
wdata = kzalloc(sizeof(*wdata) + wdata = kzalloc(sizeof(*wdata) +
sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
if (wdata != NULL) { if (wdata != NULL) {
INIT_WORK(&wdata->work, complete);
kref_init(&wdata->refcount); kref_init(&wdata->refcount);
INIT_LIST_HEAD(&wdata->list);
init_completion(&wdata->done);
INIT_WORK(&wdata->work, complete);
} }
return wdata; return wdata;
} }
......
...@@ -2106,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len) ...@@ -2106,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
return num_pages; return num_pages;
} }
static void
cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
{
int i;
size_t bytes = wdata->bytes;
/* marshal up the pages into iov array */
for (i = 0; i < wdata->nr_pages; i++) {
iov[i + 1].iov_len = min(bytes, PAGE_SIZE);
iov[i + 1].iov_base = kmap(wdata->pages[i]);
bytes -= iov[i + 1].iov_len;
}
}
static void
cifs_uncached_writev_complete(struct work_struct *work)
{
int i;
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
struct inode *inode = wdata->cfile->dentry->d_inode;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
spin_lock(&inode->i_lock);
cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
if (cifsi->server_eof > inode->i_size)
i_size_write(inode, cifsi->server_eof);
spin_unlock(&inode->i_lock);
complete(&wdata->done);
if (wdata->result != -EAGAIN) {
for (i = 0; i < wdata->nr_pages; i++)
put_page(wdata->pages[i]);
}
kref_put(&wdata->refcount, cifs_writedata_release);
}
/* attempt to send write to server, retry on any -EAGAIN errors */
static int
cifs_uncached_retry_writev(struct cifs_writedata *wdata)
{
int rc;
do {
if (wdata->cfile->invalidHandle) {
rc = cifs_reopen_file(wdata->cfile, false);
if (rc != 0)
continue;
}
rc = cifs_async_writev(wdata);
} while (rc == -EAGAIN);
return rc;
}
static ssize_t static ssize_t
cifs_iovec_write(struct file *file, const struct iovec *iov, cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset) unsigned long nr_segs, loff_t *poffset)
{ {
unsigned int written; unsigned long nr_pages, i;
unsigned long num_pages, npages, i;
size_t copied, len, cur_len; size_t copied, len, cur_len;
ssize_t total_written = 0; ssize_t total_written = 0;
struct kvec *to_send; loff_t offset = *poffset;
struct page **pages;
struct iov_iter it; struct iov_iter it;
struct inode *inode;
struct cifsFileInfo *open_file; struct cifsFileInfo *open_file;
struct cifs_tcon *pTcon; struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb; struct cifs_sb_info *cifs_sb;
struct cifs_io_parms io_parms; struct cifs_writedata *wdata, *tmp;
int xid, rc; struct list_head wdata_list;
__u32 pid; int rc;
pid_t pid;
len = iov_length(iov, nr_segs); len = iov_length(iov, nr_segs);
if (!len) if (!len)
...@@ -2133,105 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, ...@@ -2133,105 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
if (rc) if (rc)
return rc; return rc;
INIT_LIST_HEAD(&wdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
if (!pages)
return -ENOMEM;
to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
if (!to_send) {
kfree(pages);
return -ENOMEM;
}
rc = cifs_write_allocate_pages(pages, num_pages);
if (rc) {
kfree(pages);
kfree(to_send);
return rc;
}
xid = GetXid();
open_file = file->private_data; open_file = file->private_data;
tcon = tlink_tcon(open_file->tlink);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid; pid = open_file->pid;
else else
pid = current->tgid; pid = current->tgid;
pTcon = tlink_tcon(open_file->tlink);
inode = file->f_path.dentry->d_inode;
iov_iter_init(&it, iov, nr_segs, len, 0); iov_iter_init(&it, iov, nr_segs, len, 0);
npages = num_pages;
do { do {
size_t save_len = cur_len; size_t save_len;
for (i = 0; i < npages; i++) {
copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE); nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
copied = iov_iter_copy_from_user(pages[i], &it, 0, wdata = cifs_writedata_alloc(nr_pages,
copied); cifs_uncached_writev_complete);
if (!wdata) {
rc = -ENOMEM;
break;
}
rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
if (rc) {
kfree(wdata);
break;
}
save_len = cur_len;
for (i = 0; i < nr_pages; i++) {
copied = min_t(const size_t, cur_len, PAGE_SIZE);
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
0, copied);
cur_len -= copied; cur_len -= copied;
iov_iter_advance(&it, copied); iov_iter_advance(&it, copied);
to_send[i+1].iov_base = kmap(pages[i]);
to_send[i+1].iov_len = copied;
} }
cur_len = save_len - cur_len; cur_len = save_len - cur_len;
do { wdata->sync_mode = WB_SYNC_ALL;
if (open_file->invalidHandle) { wdata->nr_pages = nr_pages;
rc = cifs_reopen_file(open_file, false); wdata->offset = (__u64)offset;
if (rc != 0) wdata->cfile = cifsFileInfo_get(open_file);
break; wdata->pid = pid;
} wdata->bytes = cur_len;
io_parms.netfid = open_file->netfid; wdata->marshal_iov = cifs_uncached_marshal_iov;
io_parms.pid = pid; rc = cifs_uncached_retry_writev(wdata);
io_parms.tcon = pTcon; if (rc) {
io_parms.offset = *poffset; kref_put(&wdata->refcount, cifs_writedata_release);
io_parms.length = cur_len;
rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
npages, 0);
} while (rc == -EAGAIN);
for (i = 0; i < npages; i++)
kunmap(pages[i]);
if (written) {
len -= written;
total_written += written;
spin_lock(&inode->i_lock);
cifs_update_eof(CIFS_I(inode), *poffset, written);
spin_unlock(&inode->i_lock);
*poffset += written;
} else if (rc < 0) {
if (!total_written)
total_written = rc;
break; break;
} }
/* get length and number of kvecs of the next write */ list_add_tail(&wdata->list, &wdata_list);
npages = get_numpages(cifs_sb->wsize, len, &cur_len); offset += cur_len;
len -= cur_len;
} while (len > 0); } while (len > 0);
if (total_written > 0) { /*
spin_lock(&inode->i_lock); * If at least one write was successfully sent, then discard any rc
if (*poffset > inode->i_size) * value from the later writes. If the other write succeeds, then
i_size_write(inode, *poffset); * we'll end up returning whatever was written. If it fails, then
spin_unlock(&inode->i_lock); * we'll get a new rc value from that.
*/
if (!list_empty(&wdata_list))
rc = 0;
/*
* Wait for and collect replies for any successful sends in order of
* increasing offset. Once an error is hit or we get a fatal signal
* while waiting, then return without waiting for any more replies.
*/
restart_loop:
list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
if (!rc) {
/* FIXME: freezable too? */
rc = wait_for_completion_killable(&wdata->done);
if (rc)
rc = -EINTR;
else if (wdata->result)
rc = wdata->result;
else
total_written += wdata->bytes;
/* resend call if it's a retryable error */
if (rc == -EAGAIN) {
rc = cifs_uncached_retry_writev(wdata);
goto restart_loop;
}
}
list_del_init(&wdata->list);
kref_put(&wdata->refcount, cifs_writedata_release);
} }
cifs_stats_bytes_written(pTcon, total_written); if (total_written > 0)
mark_inode_dirty_sync(inode); *poffset += total_written;
for (i = 0; i < num_pages; i++) cifs_stats_bytes_written(tcon, total_written);
put_page(pages[i]); return total_written ? total_written : (ssize_t)rc;
kfree(to_send);
kfree(pages);
FreeXid(xid);
return total_written;
} }
ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment