Commit 13db62b7 authored by Jan Schmidt's avatar Jan Schmidt

btrfs scrub: added unverified_errors

In normal operation, scrub is reading data sequentially in large portions.
In case of an i/o error, we try to find the corrupted area(s) by issuing
page sized read requests. With this commit we increment the
unverified_errors counter if all of the small size requests succeed.

Userland patches carrying such conspicous events to the administrator should
already be around.
Signed-off-by: default avatarJan Schmidt <list.btrfs@jan-o-sch.net>
parent a542ad1b
...@@ -201,18 +201,25 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) ...@@ -201,18 +201,25 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
* recheck_error gets called for every page in the bio, even though only * recheck_error gets called for every page in the bio, even though only
* one may be bad * one may be bad
*/ */
static void scrub_recheck_error(struct scrub_bio *sbio, int ix) static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
{ {
struct scrub_dev *sdev = sbio->sdev;
u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
if (sbio->err) { if (sbio->err) {
if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
(sbio->physical + ix * PAGE_SIZE) >> 9,
sbio->bio->bi_io_vec[ix].bv_page) == 0) { sbio->bio->bi_io_vec[ix].bv_page) == 0) {
if (scrub_fixup_check(sbio, ix) == 0) if (scrub_fixup_check(sbio, ix) == 0)
return; return 0;
} }
} }
spin_lock(&sdev->stat_lock);
++sdev->stat.read_errors;
spin_unlock(&sdev->stat_lock);
scrub_fixup(sbio, ix); scrub_fixup(sbio, ix);
return 1;
} }
static int scrub_fixup_check(struct scrub_bio *sbio, int ix) static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
...@@ -382,8 +389,14 @@ static void scrub_checksum(struct btrfs_work *work) ...@@ -382,8 +389,14 @@ static void scrub_checksum(struct btrfs_work *work)
int ret; int ret;
if (sbio->err) { if (sbio->err) {
ret = 0;
for (i = 0; i < sbio->count; ++i) for (i = 0; i < sbio->count; ++i)
scrub_recheck_error(sbio, i); ret |= scrub_recheck_error(sbio, i);
if (!ret) {
spin_lock(&sdev->stat_lock);
++sdev->stat.unverified_errors;
spin_unlock(&sdev->stat_lock);
}
sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
sbio->bio->bi_flags |= 1 << BIO_UPTODATE; sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
...@@ -396,10 +409,6 @@ static void scrub_checksum(struct btrfs_work *work) ...@@ -396,10 +409,6 @@ static void scrub_checksum(struct btrfs_work *work)
bi->bv_offset = 0; bi->bv_offset = 0;
bi->bv_len = PAGE_SIZE; bi->bv_len = PAGE_SIZE;
} }
spin_lock(&sdev->stat_lock);
++sdev->stat.read_errors;
spin_unlock(&sdev->stat_lock);
goto out; goto out;
} }
for (i = 0; i < sbio->count; ++i) { for (i = 0; i < sbio->count; ++i) {
...@@ -420,8 +429,14 @@ static void scrub_checksum(struct btrfs_work *work) ...@@ -420,8 +429,14 @@ static void scrub_checksum(struct btrfs_work *work)
WARN_ON(1); WARN_ON(1);
} }
kunmap_atomic(buffer, KM_USER0); kunmap_atomic(buffer, KM_USER0);
if (ret) if (ret) {
scrub_recheck_error(sbio, i); ret = scrub_recheck_error(sbio, i);
if (!ret) {
spin_lock(&sdev->stat_lock);
++sdev->stat.unverified_errors;
spin_unlock(&sdev->stat_lock);
}
}
} }
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment