Commit 9d0d1c8b authored by Liu Bo's avatar Liu Bo Committed by David Sterba

Btrfs: bring back repair during read

Commit 20a7db8a ("btrfs: add dummy callback for readpage_io_failed
and drop checks") made a cleanup around readpage_io_failed_hook, and
it was supposed to keep the original sematics, but it also
unexpectedly disabled repair during read for dup, raid1 and raid10.

This fixes the problem by letting data's inode call the generic
readpage_io_failed callback by returning -EAGAIN from its
readpage_io_failed_hook in order to notify end_bio_extent_readpage to
do the rest.  We don't call it directly because the generic one takes
an offset from end_bio_extent_readpage() to calculate the index in the
checksum array and inode's readpage_io_failed_hook doesn't offer that
offset.

Cc: David Sterba <dsterba@suse.cz>
Signed-off-by: default avatarLiu Bo <bo.li.liu@oracle.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
[ keep the const function attribute ]
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent e1699d2d
...@@ -2584,27 +2584,37 @@ static void end_bio_extent_readpage(struct bio *bio) ...@@ -2584,27 +2584,37 @@ static void end_bio_extent_readpage(struct bio *bio)
if (tree->ops) { if (tree->ops) {
ret = tree->ops->readpage_io_failed_hook(page, mirror); ret = tree->ops->readpage_io_failed_hook(page, mirror);
if (!ret && !bio->bi_error) if (ret == -EAGAIN) {
uptodate = 1;
} else {
/* /*
* The generic bio_readpage_error handles errors the * Data inode's readpage_io_failed_hook() always
* following way: If possible, new read requests are * returns -EAGAIN.
* created and submitted and will end up in *
* end_bio_extent_readpage as well (if we're lucky, not * The generic bio_readpage_error handles errors
* in the !uptodate case). In that case it returns 0 and * the following way: If possible, new read
* we just go on with the next page in our bio. If it * requests are created and submitted and will
* can't handle the error it will return -EIO and we * end up in end_bio_extent_readpage as well (if
* remain responsible for that page. * we're lucky, not in the !uptodate case). In
*/ * that case it returns 0 and we just go on with
ret = bio_readpage_error(bio, offset, page, start, end, * the next page in our bio. If it can't handle
mirror); * the error it will return -EIO and we remain
* responsible for that page.
*/
ret = bio_readpage_error(bio, offset, page,
start, end, mirror);
if (ret == 0) { if (ret == 0) {
uptodate = !bio->bi_error; uptodate = !bio->bi_error;
offset += len; offset += len;
continue; continue;
} }
} }
/*
* metadata's readpage_io_failed_hook() always returns
* -EIO and fixes nothing. -EIO is also returned if
* data inode error could not be fixed.
*/
ASSERT(ret == -EIO);
}
readpage_ok: readpage_ok:
if (likely(uptodate)) { if (likely(uptodate)) {
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
......
...@@ -10523,9 +10523,9 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) ...@@ -10523,9 +10523,9 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
} }
__attribute__((const)) __attribute__((const))
static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
{ {
return 0; return -EAGAIN;
} }
static const struct inode_operations btrfs_dir_inode_operations = { static const struct inode_operations btrfs_dir_inode_operations = {
...@@ -10570,7 +10570,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = { ...@@ -10570,7 +10570,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
.submit_bio_hook = btrfs_submit_bio_hook, .submit_bio_hook = btrfs_submit_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook,
.merge_bio_hook = btrfs_merge_bio_hook, .merge_bio_hook = btrfs_merge_bio_hook,
.readpage_io_failed_hook = dummy_readpage_io_failed_hook, .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
/* optional callbacks */ /* optional callbacks */
.fill_delalloc = run_delalloc_range, .fill_delalloc = run_delalloc_range,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment