Commit e360d2f5 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: scrub: rename scrub_bio::pagev and related members

Since the subpage support for scrub, one page no longer always represents
one sector, thus scrub_bio::pagev and scrub_bio::sector_count are no
longer accurate.

Rename them to scrub_bio::sectors and scrub_bio::sector_count respectively.
This also involves scrub_ctx::pages_per_bio and other macros involved.

Now the renaming of pages involved in scrub is be finished.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 46343501
...@@ -45,8 +45,8 @@ struct scrub_ctx; ...@@ -45,8 +45,8 @@ struct scrub_ctx;
* operations. The first one configures an upper limit for the number * operations. The first one configures an upper limit for the number
* of (dynamically allocated) pages that are added to a bio. * of (dynamically allocated) pages that are added to a bio.
*/ */
#define SCRUB_PAGES_PER_BIO 32 /* 128KiB per bio for x86 */ #define SCRUB_SECTORS_PER_BIO 32 /* 128KiB per bio for 4KiB pages */
#define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for x86 */ #define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for 4KiB pages */
/* /*
* The following value times PAGE_SIZE needs to be large enough to match the * The following value times PAGE_SIZE needs to be large enough to match the
...@@ -87,8 +87,8 @@ struct scrub_bio { ...@@ -87,8 +87,8 @@ struct scrub_bio {
blk_status_t status; blk_status_t status;
u64 logical; u64 logical;
u64 physical; u64 physical;
struct scrub_sector *pagev[SCRUB_PAGES_PER_BIO]; struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO];
int page_count; int sector_count;
int next_free; int next_free;
struct btrfs_work work; struct btrfs_work work;
}; };
...@@ -158,7 +158,7 @@ struct scrub_ctx { ...@@ -158,7 +158,7 @@ struct scrub_ctx {
struct list_head csum_list; struct list_head csum_list;
atomic_t cancel_req; atomic_t cancel_req;
int readonly; int readonly;
int pages_per_bio; int sectors_per_bio;
/* State of IO submission throttling affecting the associated device */ /* State of IO submission throttling affecting the associated device */
ktime_t throttle_deadline; ktime_t throttle_deadline;
...@@ -535,9 +535,9 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) ...@@ -535,9 +535,9 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
if (sctx->curr != -1) { if (sctx->curr != -1) {
struct scrub_bio *sbio = sctx->bios[sctx->curr]; struct scrub_bio *sbio = sctx->bios[sctx->curr];
for (i = 0; i < sbio->page_count; i++) { for (i = 0; i < sbio->sector_count; i++) {
WARN_ON(!sbio->pagev[i]->page); WARN_ON(!sbio->sectors[i]->page);
scrub_block_put(sbio->pagev[i]->sblock); scrub_block_put(sbio->sectors[i]->sblock);
} }
bio_put(sbio->bio); bio_put(sbio->bio);
} }
...@@ -572,7 +572,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( ...@@ -572,7 +572,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
goto nomem; goto nomem;
refcount_set(&sctx->refs, 1); refcount_set(&sctx->refs, 1);
sctx->is_dev_replace = is_dev_replace; sctx->is_dev_replace = is_dev_replace;
sctx->pages_per_bio = SCRUB_PAGES_PER_BIO; sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO;
sctx->curr = -1; sctx->curr = -1;
sctx->fs_info = fs_info; sctx->fs_info = fs_info;
INIT_LIST_HEAD(&sctx->csum_list); INIT_LIST_HEAD(&sctx->csum_list);
...@@ -586,7 +586,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( ...@@ -586,7 +586,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
sbio->index = i; sbio->index = i;
sbio->sctx = sctx; sbio->sctx = sctx;
sbio->page_count = 0; sbio->sector_count = 0;
btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
NULL); NULL);
...@@ -1643,10 +1643,10 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, ...@@ -1643,10 +1643,10 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
return -ENOMEM; return -ENOMEM;
} }
sctx->wr_curr_bio->sctx = sctx; sctx->wr_curr_bio->sctx = sctx;
sctx->wr_curr_bio->page_count = 0; sctx->wr_curr_bio->sector_count = 0;
} }
sbio = sctx->wr_curr_bio; sbio = sctx->wr_curr_bio;
if (sbio->page_count == 0) { if (sbio->sector_count == 0) {
struct bio *bio; struct bio *bio;
ret = fill_writer_pointer_gap(sctx, sector->physical_for_dev_replace); ret = fill_writer_pointer_gap(sctx, sector->physical_for_dev_replace);
...@@ -1660,7 +1660,7 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, ...@@ -1660,7 +1660,7 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
sbio->dev = sctx->wr_tgtdev; sbio->dev = sctx->wr_tgtdev;
bio = sbio->bio; bio = sbio->bio;
if (!bio) { if (!bio) {
bio = btrfs_bio_alloc(sctx->pages_per_bio); bio = btrfs_bio_alloc(sctx->sectors_per_bio);
sbio->bio = bio; sbio->bio = bio;
} }
...@@ -1670,9 +1670,9 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, ...@@ -1670,9 +1670,9 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_iter.bi_sector = sbio->physical >> 9;
bio->bi_opf = REQ_OP_WRITE; bio->bi_opf = REQ_OP_WRITE;
sbio->status = 0; sbio->status = 0;
} else if (sbio->physical + sbio->page_count * sectorsize != } else if (sbio->physical + sbio->sector_count * sectorsize !=
sector->physical_for_dev_replace || sector->physical_for_dev_replace ||
sbio->logical + sbio->page_count * sectorsize != sbio->logical + sbio->sector_count * sectorsize !=
sector->logical) { sector->logical) {
scrub_wr_submit(sctx); scrub_wr_submit(sctx);
goto again; goto again;
...@@ -1680,7 +1680,7 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, ...@@ -1680,7 +1680,7 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0); ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
if (ret != sectorsize) { if (ret != sectorsize) {
if (sbio->page_count < 1) { if (sbio->sector_count < 1) {
bio_put(sbio->bio); bio_put(sbio->bio);
sbio->bio = NULL; sbio->bio = NULL;
mutex_unlock(&sctx->wr_lock); mutex_unlock(&sctx->wr_lock);
...@@ -1690,10 +1690,10 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx, ...@@ -1690,10 +1690,10 @@ static int scrub_add_sector_to_wr_bio(struct scrub_ctx *sctx,
goto again; goto again;
} }
sbio->pagev[sbio->page_count] = sector; sbio->sectors[sbio->sector_count] = sector;
scrub_sector_get(sector); scrub_sector_get(sector);
sbio->page_count++; sbio->sector_count++;
if (sbio->page_count == sctx->pages_per_bio) if (sbio->sector_count == sctx->sectors_per_bio)
scrub_wr_submit(sctx); scrub_wr_submit(sctx);
mutex_unlock(&sctx->wr_lock); mutex_unlock(&sctx->wr_lock);
...@@ -1718,7 +1718,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx) ...@@ -1718,7 +1718,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
btrfsic_submit_bio(sbio->bio); btrfsic_submit_bio(sbio->bio);
if (btrfs_is_zoned(sctx->fs_info)) if (btrfs_is_zoned(sctx->fs_info))
sctx->write_pointer = sbio->physical + sbio->page_count * sctx->write_pointer = sbio->physical + sbio->sector_count *
sctx->fs_info->sectorsize; sctx->fs_info->sectorsize;
} }
...@@ -1740,21 +1740,21 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) ...@@ -1740,21 +1740,21 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
struct scrub_ctx *sctx = sbio->sctx; struct scrub_ctx *sctx = sbio->sctx;
int i; int i;
ASSERT(sbio->page_count <= SCRUB_PAGES_PER_BIO); ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
if (sbio->status) { if (sbio->status) {
struct btrfs_dev_replace *dev_replace = struct btrfs_dev_replace *dev_replace =
&sbio->sctx->fs_info->dev_replace; &sbio->sctx->fs_info->dev_replace;
for (i = 0; i < sbio->page_count; i++) { for (i = 0; i < sbio->sector_count; i++) {
struct scrub_sector *sector = sbio->pagev[i]; struct scrub_sector *sector = sbio->sectors[i];
sector->io_error = 1; sector->io_error = 1;
atomic64_inc(&dev_replace->num_write_errors); atomic64_inc(&dev_replace->num_write_errors);
} }
} }
for (i = 0; i < sbio->page_count; i++) for (i = 0; i < sbio->sector_count; i++)
scrub_sector_put(sbio->pagev[i]); scrub_sector_put(sbio->sectors[i]);
bio_put(sbio->bio); bio_put(sbio->bio);
kfree(sbio); kfree(sbio);
...@@ -2070,7 +2070,7 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, ...@@ -2070,7 +2070,7 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
if (sctx->curr != -1) { if (sctx->curr != -1) {
sctx->first_free = sctx->bios[sctx->curr]->next_free; sctx->first_free = sctx->bios[sctx->curr]->next_free;
sctx->bios[sctx->curr]->next_free = -1; sctx->bios[sctx->curr]->next_free = -1;
sctx->bios[sctx->curr]->page_count = 0; sctx->bios[sctx->curr]->sector_count = 0;
spin_unlock(&sctx->list_lock); spin_unlock(&sctx->list_lock);
} else { } else {
spin_unlock(&sctx->list_lock); spin_unlock(&sctx->list_lock);
...@@ -2078,7 +2078,7 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, ...@@ -2078,7 +2078,7 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
} }
} }
sbio = sctx->bios[sctx->curr]; sbio = sctx->bios[sctx->curr];
if (sbio->page_count == 0) { if (sbio->sector_count == 0) {
struct bio *bio; struct bio *bio;
sbio->physical = sector->physical; sbio->physical = sector->physical;
...@@ -2086,7 +2086,7 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, ...@@ -2086,7 +2086,7 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
sbio->dev = sector->dev; sbio->dev = sector->dev;
bio = sbio->bio; bio = sbio->bio;
if (!bio) { if (!bio) {
bio = btrfs_bio_alloc(sctx->pages_per_bio); bio = btrfs_bio_alloc(sctx->sectors_per_bio);
sbio->bio = bio; sbio->bio = bio;
} }
...@@ -2096,19 +2096,19 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, ...@@ -2096,19 +2096,19 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_iter.bi_sector = sbio->physical >> 9;
bio->bi_opf = REQ_OP_READ; bio->bi_opf = REQ_OP_READ;
sbio->status = 0; sbio->status = 0;
} else if (sbio->physical + sbio->page_count * sectorsize != } else if (sbio->physical + sbio->sector_count * sectorsize !=
sector->physical || sector->physical ||
sbio->logical + sbio->page_count * sectorsize != sbio->logical + sbio->sector_count * sectorsize !=
sector->logical || sector->logical ||
sbio->dev != sector->dev) { sbio->dev != sector->dev) {
scrub_submit(sctx); scrub_submit(sctx);
goto again; goto again;
} }
sbio->pagev[sbio->page_count] = sector; sbio->sectors[sbio->sector_count] = sector;
ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0); ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0);
if (ret != sectorsize) { if (ret != sectorsize) {
if (sbio->page_count < 1) { if (sbio->sector_count < 1) {
bio_put(sbio->bio); bio_put(sbio->bio);
sbio->bio = NULL; sbio->bio = NULL;
return -EIO; return -EIO;
...@@ -2119,8 +2119,8 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx, ...@@ -2119,8 +2119,8 @@ static int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
scrub_block_get(sblock); /* one for the page added to the bio */ scrub_block_get(sblock); /* one for the page added to the bio */
atomic_inc(&sblock->outstanding_sectors); atomic_inc(&sblock->outstanding_sectors);
sbio->page_count++; sbio->sector_count++;
if (sbio->page_count == sctx->pages_per_bio) if (sbio->sector_count == sctx->sectors_per_bio)
scrub_submit(sctx); scrub_submit(sctx);
return 0; return 0;
...@@ -2354,10 +2354,10 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) ...@@ -2354,10 +2354,10 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
struct scrub_ctx *sctx = sbio->sctx; struct scrub_ctx *sctx = sbio->sctx;
int i; int i;
ASSERT(sbio->page_count <= SCRUB_PAGES_PER_BIO); ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO);
if (sbio->status) { if (sbio->status) {
for (i = 0; i < sbio->page_count; i++) { for (i = 0; i < sbio->sector_count; i++) {
struct scrub_sector *sector = sbio->pagev[i]; struct scrub_sector *sector = sbio->sectors[i];
sector->io_error = 1; sector->io_error = 1;
sector->sblock->no_io_error_seen = 0; sector->sblock->no_io_error_seen = 0;
...@@ -2365,8 +2365,8 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) ...@@ -2365,8 +2365,8 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
} }
/* Now complete the scrub_block items that have all pages completed */ /* Now complete the scrub_block items that have all pages completed */
for (i = 0; i < sbio->page_count; i++) { for (i = 0; i < sbio->sector_count; i++) {
struct scrub_sector *sector = sbio->pagev[i]; struct scrub_sector *sector = sbio->sectors[i];
struct scrub_block *sblock = sector->sblock; struct scrub_block *sblock = sector->sblock;
if (atomic_dec_and_test(&sblock->outstanding_sectors)) if (atomic_dec_and_test(&sblock->outstanding_sectors))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment