Commit b70f86ce authored by Ming Lei's avatar Ming Lei Committed by Greg Kroah-Hartman

md: remove 'idx' from 'struct resync_pages'

commit 022e510f upstream.

bio_add_page() won't fail for resync bio, and the page index for each
bio is same, so remove it.

More importantly the 'idx' of 'struct resync_pages' is initialized in
mempool allocator function, the current way is wrong since mempool is
only responsible for allocation, we can't use that for initialization.
Suggested-by: default avatarNeilBrown <neilb@suse.com>
Reported-by: default avatarNeilBrown <neilb@suse.com>
Reported-and-tested-by: default avatarPatrick <dto@gmx.net>
Fixes: f0250618(md: raid10: don't use bio's vec table to manage resync pages)
Fixes: 98d30c58(md: raid1: don't use bio's vec table to manage resync pages)
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent bce72191
...@@ -733,7 +733,6 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio ...@@ -733,7 +733,6 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
/* for managing resync I/O pages */ /* for managing resync I/O pages */
struct resync_pages { struct resync_pages {
unsigned idx; /* for get/put page from the pool */
void *raid_bio; void *raid_bio;
struct page *pages[RESYNC_PAGES]; struct page *pages[RESYNC_PAGES];
}; };
......
...@@ -170,7 +170,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -170,7 +170,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp); resync_get_all_pages(rp);
} }
rp->idx = 0;
rp->raid_bio = r1_bio; rp->raid_bio = r1_bio;
bio->bi_private = rp; bio->bi_private = rp;
} }
...@@ -2621,6 +2620,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2621,6 +2620,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
int good_sectors = RESYNC_SECTORS; int good_sectors = RESYNC_SECTORS;
int min_bad = 0; /* number of sectors that are bad in all devices */ int min_bad = 0; /* number of sectors that are bad in all devices */
int idx = sector_to_idx(sector_nr); int idx = sector_to_idx(sector_nr);
int page_idx = 0;
if (!conf->r1buf_pool) if (!conf->r1buf_pool)
if (init_resync(conf)) if (init_resync(conf))
...@@ -2848,7 +2848,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2848,7 +2848,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i]; bio = r1_bio->bios[i];
rp = get_resync_pages(bio); rp = get_resync_pages(bio);
if (bio->bi_end_io) { if (bio->bi_end_io) {
page = resync_fetch_page(rp, rp->idx++); page = resync_fetch_page(rp, page_idx);
/* /*
* won't fail because the vec table is big * won't fail because the vec table is big
...@@ -2860,7 +2860,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2860,7 +2860,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
nr_sectors += len>>9; nr_sectors += len>>9;
sector_nr += len>>9; sector_nr += len>>9;
sync_blocks -= (len>>9); sync_blocks -= (len>>9);
} while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); } while (++page_idx < RESYNC_PAGES);
r1_bio->sectors = nr_sectors; r1_bio->sectors = nr_sectors;
......
...@@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) ...@@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
resync_get_all_pages(rp); resync_get_all_pages(rp);
} }
rp->idx = 0;
rp->raid_bio = r10_bio; rp->raid_bio = r10_bio;
bio->bi_private = rp; bio->bi_private = rp;
if (rbio) { if (rbio) {
...@@ -2853,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2853,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sectors_skipped = 0; sector_t sectors_skipped = 0;
int chunks_skipped = 0; int chunks_skipped = 0;
sector_t chunk_mask = conf->geo.chunk_mask; sector_t chunk_mask = conf->geo.chunk_mask;
int page_idx = 0;
if (!conf->r10buf_pool) if (!conf->r10buf_pool)
if (init_resync(conf)) if (init_resync(conf))
...@@ -3355,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3355,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
break; break;
for (bio= biolist ; bio ; bio=bio->bi_next) { for (bio= biolist ; bio ; bio=bio->bi_next) {
struct resync_pages *rp = get_resync_pages(bio); struct resync_pages *rp = get_resync_pages(bio);
page = resync_fetch_page(rp, rp->idx++); page = resync_fetch_page(rp, page_idx);
/* /*
* won't fail because the vec table is big enough * won't fail because the vec table is big enough
* to hold all these pages * to hold all these pages
...@@ -3364,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3364,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
} }
nr_sectors += len>>9; nr_sectors += len>>9;
sector_nr += len>>9; sector_nr += len>>9;
} while (get_resync_pages(biolist)->idx < RESYNC_PAGES); } while (++page_idx < RESYNC_PAGES);
r10_bio->sectors = nr_sectors; r10_bio->sectors = nr_sectors;
while (biolist) { while (biolist) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment