Commit 09314799 authored by NeilBrown's avatar NeilBrown

md: remove 'go_faster' option from ->sync_request()

This option is not well justified and testing suggests that
it hardly ever makes any difference.

The comment suggests there might be a need to wait for non-resync
activity indicated by ->nr_waiting, however raise_barrier()
already waits for all of that.

So just remove it to simplify reasoning about speed limiting.

This allows us to remove a 'FIXME' comment from raid5.c as that
never used the flag.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 50c37b13
...@@ -7820,8 +7820,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -7820,8 +7820,7 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break; break;
sectors = mddev->pers->sync_request(mddev, j, &skipped, sectors = mddev->pers->sync_request(mddev, j, &skipped);
currspeed < speed_min(mddev));
if (sectors == 0) { if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break; break;
...@@ -7898,7 +7897,7 @@ void md_do_sync(struct md_thread *thread) ...@@ -7898,7 +7897,7 @@ void md_do_sync(struct md_thread *thread)
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */ /* tell personality that we are finished */
mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (mddev_is_clustered(mddev)) if (mddev_is_clustered(mddev))
md_cluster_ops->resync_finish(mddev); md_cluster_ops->resync_finish(mddev);
......
...@@ -506,7 +506,7 @@ struct md_personality ...@@ -506,7 +506,7 @@ struct md_personality
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev); int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev); int (*spare_active) (struct mddev *mddev);
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster); sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors); int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks); sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev); int (*check_reshape) (struct mddev *mddev);
......
...@@ -2480,7 +2480,7 @@ static int init_resync(struct r1conf *conf) ...@@ -2480,7 +2480,7 @@ static int init_resync(struct r1conf *conf)
* that can be installed to exclude normal IO requests. * that can be installed to exclude normal IO requests.
*/ */
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{ {
struct r1conf *conf = mddev->private; struct r1conf *conf = mddev->private;
struct r1bio *r1_bio; struct r1bio *r1_bio;
...@@ -2533,13 +2533,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp ...@@ -2533,13 +2533,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*skipped = 1; *skipped = 1;
return sync_blocks; return sync_blocks;
} }
/*
* If there is non-resync activity waiting for a turn,
* and resync is going fast enough,
* then let it though before starting on this new sync request.
*/
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
bitmap_cond_end_sync(mddev->bitmap, sector_nr); bitmap_cond_end_sync(mddev->bitmap, sector_nr);
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
......
...@@ -2889,7 +2889,7 @@ static int init_resync(struct r10conf *conf) ...@@ -2889,7 +2889,7 @@ static int init_resync(struct r10conf *conf)
*/ */
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
int *skipped, int go_faster) int *skipped)
{ {
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
struct r10bio *r10_bio; struct r10bio *r10_bio;
...@@ -2994,12 +2994,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -2994,12 +2994,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (conf->geo.near_copies < conf->geo.raid_disks && if (conf->geo.near_copies < conf->geo.raid_disks &&
max_sector > (sector_nr | chunk_mask)) max_sector > (sector_nr | chunk_mask))
max_sector = (sector_nr | chunk_mask) + 1; max_sector = (sector_nr | chunk_mask) + 1;
/*
* If there is non-resync activity waiting for us then
* put in a delay to throttle resync.
*/
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
/* Again, very different code for resync and recovery. /* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that * Both must result in an r10bio with a list of bios that
......
...@@ -5050,8 +5050,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk ...@@ -5050,8 +5050,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
return reshape_sectors; return reshape_sectors;
} }
/* FIXME go_faster isn't used */ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
{ {
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
struct stripe_head *sh; struct stripe_head *sh;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment