Commit b7651add authored by Andrew Morton's avatar Andrew Morton Committed by David Mosberger

[PATCH] Fix generic_file_write() again.

From: "Milton D. Miller II" <miltonm@realtime.net>

The code at present has a small problem: when a fault is encountered we will
run commit_write() to cover the amount of data which was successfully copied
in from userspace.

But filemap_copy_from_user() may have zeroed out some more of the page.  So
pagecache now has zeroes and the buffer_head which represents those zeroes is
not dirtied.  So a subsequent eviction and re-read of the file in the window
beyond the faulting offset will return the file's old contents and not the
zeroes.

So we change filemap_copy_from_user_iovec() to have the same behaviour as the
non-iovec filemap_copy_from_user(), and ensure that the commit_write() covers
the parts of the page which copy_from_user() zeroed out.
parent 2632cc57
......@@ -1408,6 +1408,11 @@ void remove_suid(struct dentry *dentry)
}
}
/*
* Copy as much as we can into the page and return the number of bytes which
* were sucessfully copied. If a fault is encountered then clear the page
* out to (offset+bytes) and return the number of bytes which were copied.
*/
static inline size_t
filemap_copy_from_user(struct page *page, unsigned long offset,
const char __user *buf, unsigned bytes)
......@@ -1425,30 +1430,42 @@ filemap_copy_from_user(struct page *page, unsigned long offset,
left = __copy_from_user(kaddr + offset, buf, bytes);
kunmap(page);
}
return left ? 0 : bytes;
return bytes - left;
}
static size_t
__filemap_copy_from_user_iovec(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes)
{
size_t copied = 0;
size_t copied = 0, left = 0;
while (bytes) {
char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base);
base = 0;
if (__copy_from_user(vaddr, buf, copy))
break;
left = __copy_from_user(vaddr, buf, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
iov++;
if (unlikely(left)) {
/* zero the rest of the target like __copy_from_user */
if (bytes)
memset(vaddr, 0, bytes);
break;
}
}
return copied;
return copied - left;
}
/*
* This has the same sideeffects and return value as filemap_copy_from_user().
* The difference is that on a fault we need to memset the remainder of the
* page (out to offset+bytes), to emulate filemap_copy_from_user()'s
* single-segment behaviour.
*/
static inline size_t
filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
const struct iovec *iov, size_t base, size_t bytes)
......@@ -1716,8 +1733,7 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
copied = filemap_copy_from_user_iovec(page, offset,
cur_iov, iov_base, bytes);
flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset,
offset + copied);
status = a_ops->commit_write(file, page, offset, offset+bytes);
if (likely(copied > 0)) {
if (!status)
status = copied;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment