Commit b1bd055d authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe

block: Introduce blk_set_stacking_limits function

Stacking driver queue limits are typically bounded exclusively by the
capabilities of the low level devices, not by the stacking driver
itself.

This patch introduces blk_set_stacking_limits() which has more liberal
metrics than the default queue limits function. This allows us to
inherit topology parameters from bottom devices without manually
tweaking the default limits in each driver prior to calling the stacking
function.

Since there is now a clear distinction between stacking and low-level
devices, blk_set_default_limits() has been modified to carry the more
conservative values that we used to manually set in
blk_queue_make_request().
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c98b2cc2
......@@ -104,9 +104,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
* @lim: the queue_limits structure to reset
*
* Description:
* Returns a queue_limit struct to its default state. Can be used by
* stacking drivers like DM that stage table swaps and reuse an
* existing device queue.
* Returns a queue_limit struct to its default state.
*/
void blk_set_default_limits(struct queue_limits *lim)
{
......@@ -114,13 +112,12 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->max_integrity_segments = 0;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = BLK_DEF_MAX_SECTORS;
lim->max_hw_sectors = INT_MAX;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->max_discard_sectors = 0;
lim->discard_granularity = 0;
lim->discard_alignment = 0;
lim->discard_misaligned = 0;
lim->discard_zeroes_data = 1;
lim->discard_zeroes_data = 0;
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
lim->alignment_offset = 0;
......@@ -130,6 +127,27 @@ void blk_set_default_limits(struct queue_limits *lim)
}
EXPORT_SYMBOL(blk_set_default_limits);
/**
* blk_set_stacking_limits - set default limits for stacking devices
* @lim: the queue_limits structure to reset
*
* Description:
* Returns a queue_limit struct to its default state. Should be used
* by stacking drivers like DM that have no internal limits.
*/
void blk_set_stacking_limits(struct queue_limits *lim)
{
blk_set_default_limits(lim);
/* Inherit limits from component devices */
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
lim->max_sectors = BLK_DEF_MAX_SECTORS;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
......@@ -165,8 +183,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->nr_batching = BLK_BATCH_REQ;
blk_set_default_limits(&q->limits);
blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
q->limits.discard_zeroes_data = 0;
/*
* by default assume old behaviour and bounce for any highmem page
......
......@@ -699,7 +699,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++);
blk_set_default_limits(&ti_limits);
blk_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */
if (ti->type->iterate_devices)
......@@ -1221,10 +1221,10 @@ int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits ti_limits;
unsigned i = 0;
blk_set_default_limits(limits);
blk_set_stacking_limits(limits);
while (i < dm_table_get_num_targets(table)) {
blk_set_default_limits(&ti_limits);
blk_set_stacking_limits(&ti_limits);
ti = dm_table_get_target(table, i++);
......
......@@ -4622,6 +4622,7 @@ static int md_alloc(dev_t dev, char *name)
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
if (!disk) {
......
......@@ -844,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment