Commit cc4d1efd authored by Jonathan Brassow's avatar Jonathan Brassow Committed by NeilBrown

MD RAID10: Export md_raid10_congested

md/raid10: Export is_congested test.

In similar fashion to commits
	11d8a6e3
	1ed7242e
we export the RAID10 congestion checking function so that dm-raid.c can
make use of it and make use of the personality.  The 'queue' and 'gendisk'
structures will not be available to the MD code when device-mapper sets
up the device, so we conditionalize access to these fields also.
Signed-off-by: default avatarJonathan Brassow <jbrassow@redhat.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 473e87ce
...@@ -853,9 +853,8 @@ static struct md_rdev *read_balance(struct r10conf *conf, ...@@ -853,9 +853,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
return rdev; return rdev;
} }
static int raid10_congested(void *data, int bits) int md_raid10_congested(struct mddev *mddev, int bits)
{ {
struct mddev *mddev = data;
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
int i, ret = 0; int i, ret = 0;
...@@ -863,8 +862,6 @@ static int raid10_congested(void *data, int bits) ...@@ -863,8 +862,6 @@ static int raid10_congested(void *data, int bits)
conf->pending_count >= max_queued_requests) conf->pending_count >= max_queued_requests)
return 1; return 1;
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock(); rcu_read_lock();
for (i = 0; for (i = 0;
(i < conf->geo.raid_disks || i < conf->prev.raid_disks) (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
...@@ -880,6 +877,15 @@ static int raid10_congested(void *data, int bits) ...@@ -880,6 +877,15 @@ static int raid10_congested(void *data, int bits)
rcu_read_unlock(); rcu_read_unlock();
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(md_raid10_congested);
static int raid10_congested(void *data, int bits)
{
struct mddev *mddev = data;
return mddev_congested(mddev, bits) ||
md_raid10_congested(mddev, bits);
}
static void flush_pending_writes(struct r10conf *conf) static void flush_pending_writes(struct r10conf *conf)
{ {
...@@ -3486,12 +3492,14 @@ static int run(struct mddev *mddev) ...@@ -3486,12 +3492,14 @@ static int run(struct mddev *mddev)
conf->thread = NULL; conf->thread = NULL;
chunk_size = mddev->chunk_sectors << 9; chunk_size = mddev->chunk_sectors << 9;
if (mddev->queue) {
blk_queue_io_min(mddev->queue, chunk_size); blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies) if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
else else
blk_queue_io_opt(mddev->queue, chunk_size * blk_queue_io_opt(mddev->queue, chunk_size *
(conf->geo.raid_disks / conf->geo.near_copies)); (conf->geo.raid_disks / conf->geo.near_copies));
}
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
long long diff; long long diff;
...@@ -3525,6 +3533,7 @@ static int run(struct mddev *mddev) ...@@ -3525,6 +3533,7 @@ static int run(struct mddev *mddev)
if (first || diff < min_offset_diff) if (first || diff < min_offset_diff)
min_offset_diff = diff; min_offset_diff = diff;
if (mddev->gendisk)
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
...@@ -3589,6 +3598,9 @@ static int run(struct mddev *mddev) ...@@ -3589,6 +3598,9 @@ static int run(struct mddev *mddev)
md_set_array_sectors(mddev, size); md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size; mddev->resync_max_sectors = size;
if (mddev->queue) {
int stripe = conf->geo.raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
mddev->queue->backing_dev_info.congested_fn = raid10_congested; mddev->queue->backing_dev_info.congested_fn = raid10_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
...@@ -3596,15 +3608,12 @@ static int run(struct mddev *mddev) ...@@ -3596,15 +3608,12 @@ static int run(struct mddev *mddev)
* We need to readahead at least twice a whole stripe.... * We need to readahead at least twice a whole stripe....
* maybe... * maybe...
*/ */
{
int stripe = conf->geo.raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies; stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
} }
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
if (md_integrity_register(mddev)) if (md_integrity_register(mddev))
goto out_free_conf; goto out_free_conf;
...@@ -3655,7 +3664,10 @@ static int stop(struct mddev *mddev) ...@@ -3655,7 +3664,10 @@ static int stop(struct mddev *mddev)
lower_barrier(conf); lower_barrier(conf);
md_unregister_thread(&mddev->thread); md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ if (mddev->queue)
/* the unplug fn references 'conf'*/
blk_sync_queue(mddev->queue);
if (conf->r10bio_pool) if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool); mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors); kfree(conf->mirrors);
......
...@@ -145,4 +145,7 @@ enum r10bio_state { ...@@ -145,4 +145,7 @@ enum r10bio_state {
*/ */
R10BIO_Previous, R10BIO_Previous,
}; };
extern int md_raid10_congested(struct mddev *mddev, int bits);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment