Commit b1bf862e authored by Chris Mason's avatar Chris Mason

Btrfs: fix regressions in copy_from_user handling

Commit 914ee295 fixed deadlocks in
btrfs_file_write where we would catch page faults on pages we had
locked.

But, there were a few problems:

1) The x86-32 iov_iter_copy_from_user_atomic code always fails to copy
data when the amount to copy is more than 4K and the offset to start
copying from is not page aligned.  The result was btrfs_file_write
looping forever retrying the iov_iter_copy_from_user_atomic

We deal with this by changing btrfs_file_write to drop down to single
page copies when iov_iter_copy_from_user_atomic starts returning failure.

2) The btrfs_file_write code was leaking delalloc reservations when
iov_iter_copy_from_user_atomic returned zero.  The looping above would
result in the entire filesystem running out of delalloc reservations and
constantly trying to flush things to disk.

3) btrfs_file_write will lock down page cache pages, make sure
any writeback is finished, do the copy_from_user and then release them.
Before the loop runs we check the first and last pages in the write to
see if they are only being partially modified.  If the start or end of
the write isn't aligned, we make sure the corresponding pages are
up to date so that we don't introduce garbage into the file.

With the copy_from_user changes, we're allowing the VM to reclaim the
pages after a partial update from copy_from_user, but we're not
making sure the page cache page is up to date when we loop around to
resume the write.

We deal with this by pushing the up to date checks down into the page
prep code.  This fits better with how the rest of file_write works.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
Reported-by: default avatarMitch Harder <mitch.harder@sabayonlinux.org>
cc: stable@kernel.org
parent ec29ed5b
...@@ -761,6 +761,27 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, ...@@ -761,6 +761,27 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
/*
* on error we return an unlocked page and the error value
* on success we return a locked page and 0
*/
static int prepare_uptodate_page(struct page *page, u64 pos)
{
int ret = 0;
if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
if (ret)
return ret;
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
return -EIO;
}
}
return 0;
}
/* /*
* this gets pages into the page cache and locks them down, it also properly * this gets pages into the page cache and locks them down, it also properly
* waits for data=ordered extents to finish before allowing the pages to be * waits for data=ordered extents to finish before allowing the pages to be
...@@ -776,6 +797,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -776,6 +797,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
unsigned long index = pos >> PAGE_CACHE_SHIFT; unsigned long index = pos >> PAGE_CACHE_SHIFT;
struct inode *inode = fdentry(file)->d_inode; struct inode *inode = fdentry(file)->d_inode;
int err = 0; int err = 0;
int faili = 0;
u64 start_pos; u64 start_pos;
u64 last_pos; u64 last_pos;
...@@ -793,15 +815,24 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -793,15 +815,24 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i); pages[i] = grab_cache_page(inode->i_mapping, index + i);
if (!pages[i]) { if (!pages[i]) {
int c; faili = i - 1;
for (c = i - 1; c >= 0; c--) { err = -ENOMEM;
unlock_page(pages[c]); goto fail;
page_cache_release(pages[c]); }
}
return -ENOMEM; if (i == 0)
err = prepare_uptodate_page(pages[i], pos);
if (i == num_pages - 1)
err = prepare_uptodate_page(pages[i],
pos + write_bytes);
if (err) {
page_cache_release(pages[i]);
faili = i - 1;
goto fail;
} }
wait_on_page_writeback(pages[i]); wait_on_page_writeback(pages[i]);
} }
err = 0;
if (start_pos < inode->i_size) { if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_extent_bits(&BTRFS_I(inode)->io_tree,
...@@ -841,6 +872,14 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, ...@@ -841,6 +872,14 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
WARN_ON(!PageLocked(pages[i])); WARN_ON(!PageLocked(pages[i]));
} }
return 0; return 0;
fail:
while (faili >= 0) {
unlock_page(pages[faili]);
page_cache_release(pages[faili]);
faili--;
}
return err;
} }
static ssize_t btrfs_file_aio_write(struct kiocb *iocb, static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
...@@ -850,7 +889,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -850,7 +889,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = fdentry(file)->d_inode; struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct page *pinned[2];
struct page **pages = NULL; struct page **pages = NULL;
struct iov_iter i; struct iov_iter i;
loff_t *ppos = &iocb->ki_pos; loff_t *ppos = &iocb->ki_pos;
...@@ -871,9 +909,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -871,9 +909,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
(file->f_flags & O_DIRECT)); (file->f_flags & O_DIRECT));
pinned[0] = NULL;
pinned[1] = NULL;
start_pos = pos; start_pos = pos;
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
...@@ -961,32 +996,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -961,32 +996,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
first_index = pos >> PAGE_CACHE_SHIFT; first_index = pos >> PAGE_CACHE_SHIFT;
last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
/*
* there are lots of better ways to do this, but this code
* makes sure the first and last page in the file range are
* up to date and ready for cow
*/
if ((pos & (PAGE_CACHE_SIZE - 1))) {
pinned[0] = grab_cache_page(inode->i_mapping, first_index);
if (!PageUptodate(pinned[0])) {
ret = btrfs_readpage(NULL, pinned[0]);
BUG_ON(ret);
wait_on_page_locked(pinned[0]);
} else {
unlock_page(pinned[0]);
}
}
if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
pinned[1] = grab_cache_page(inode->i_mapping, last_index);
if (!PageUptodate(pinned[1])) {
ret = btrfs_readpage(NULL, pinned[1]);
BUG_ON(ret);
wait_on_page_locked(pinned[1]);
} else {
unlock_page(pinned[1]);
}
}
while (iov_iter_count(&i) > 0) { while (iov_iter_count(&i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1); size_t offset = pos & (PAGE_CACHE_SIZE - 1);
size_t write_bytes = min(iov_iter_count(&i), size_t write_bytes = min(iov_iter_count(&i),
...@@ -1023,8 +1032,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -1023,8 +1032,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
copied = btrfs_copy_from_user(pos, num_pages, copied = btrfs_copy_from_user(pos, num_pages,
write_bytes, pages, &i); write_bytes, pages, &i);
dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT; /*
* if we have trouble faulting in the pages, fall
* back to one page at a time
*/
if (copied < write_bytes)
nrptrs = 1;
if (copied == 0)
dirty_pages = 0;
else
dirty_pages = (copied + offset +
PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
if (num_pages > dirty_pages) { if (num_pages > dirty_pages) {
if (copied > 0) if (copied > 0)
...@@ -1068,10 +1089,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, ...@@ -1068,10 +1089,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
err = ret; err = ret;
kfree(pages); kfree(pages);
if (pinned[0])
page_cache_release(pinned[0]);
if (pinned[1])
page_cache_release(pinned[1]);
*ppos = pos; *ppos = pos;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment