Commit e16344e5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

drbd: merge drbd_setup_queue_param into drbd_reconsider_queue_parameters

drbd_setup_queue_param is only called by drbd_reconsider_queue_parameters
and there is no really clear boundary of responsibilities between the
two.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Reviewed-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
Tested-by: default avatarChristoph Böhmwalder <christoph.boehmwalder@linbit.com>
Link: https://lore.kernel.org/r/20240306140332.623759-5-philipp.reisner@linbit.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2828908d
......@@ -1309,45 +1309,16 @@ static unsigned int drbd_backing_dev_max_segments(struct drbd_device *device)
return max_segments;
}
static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
unsigned int max_bio_size, struct o_qlim *o)
{
struct request_queue * const q = device->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
unsigned int max_segments = BLK_MAX_SEGMENTS;
struct request_queue *b = NULL;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
max_segments = drbd_backing_dev_max_segments(device);
blk_set_stacking_limits(&q->limits);
}
blk_queue_max_hw_sectors(q, max_hw_sectors);
blk_queue_max_segments(q, max_segments);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
decide_on_discard_support(device, bdev);
if (b) {
blk_stack_limits(&q->limits, &b->limits, 0);
disk_update_readahead(device->vdisk);
}
fixup_write_zeroes(device, q);
fixup_discard_support(device, q);
}
void drbd_reconsider_queue_parameters(struct drbd_device *device,
struct drbd_backing_dev *bdev, struct o_qlim *o)
{
unsigned int now = queue_max_hw_sectors(device->rq_queue) <<
SECTOR_SHIFT;
struct request_queue * const q = device->rq_queue;
unsigned int now = queue_max_hw_sectors(q) << 9;
struct request_queue *b = NULL;
unsigned int new;
if (bdev) {
struct request_queue *b = bdev->backing_bdev->bd_disk->queue;
b = bdev->backing_bdev->bd_disk->queue;
device->local_max_bio_size =
queue_max_hw_sectors(b) << SECTOR_SHIFT;
......@@ -1369,7 +1340,24 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
drbd_info(device, "max BIO size = %u\n", new);
}
drbd_setup_queue_param(device, bdev, new, o);
if (bdev) {
blk_set_stacking_limits(&q->limits);
blk_queue_max_segments(q,
drbd_backing_dev_max_segments(device));
} else {
blk_queue_max_segments(q, BLK_MAX_SEGMENTS);
}
blk_queue_max_hw_sectors(q, new >> SECTOR_SHIFT);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
decide_on_discard_support(device, bdev);
if (bdev) {
blk_stack_limits(&q->limits, &b->limits, 0);
disk_update_readahead(device->vdisk);
}
fixup_write_zeroes(device, q);
fixup_discard_support(device, q);
}
/* Starts the worker thread */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment