Commit 72d4cd9f authored by Mike Snitzer's avatar Mike Snitzer Committed by Jens Axboe

block: max hardware sectors limit wrapper

Implement blk_limits_max_hw_sectors() and make
blk_queue_max_hw_sectors() a wrapper around it.

DM needs this to avoid setting queue_limits' max_hw_sectors and
max_sectors directly.  dm_set_device_limits() now leverages
blk_limits_max_hw_sectors() logic to establish the appropriate
max_hw_sectors minimum (PAGE_SIZE).  Fixes issue where DM was
incorrectly setting max_sectors rather than max_hw_sectors (which
caused dm_merge_bvec()'s max_hw_sectors check to be ineffective).
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: stable@kernel.org
Acked-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent e692cb66
...@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) ...@@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(blk_queue_bounce_limit);
/** /**
* blk_queue_max_hw_sectors - set max sectors for a request for this queue * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
* @q: the request queue for the device * @limits: the queue limits
* @max_hw_sectors: max hardware sectors in the usual 512b unit * @max_hw_sectors: max hardware sectors in the usual 512b unit
* *
* Description: * Description:
...@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); ...@@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* per-device basis in /sys/block/<device>/queue/max_sectors_kb. * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
* The soft limit can not exceed max_hw_sectors. * The soft limit can not exceed max_hw_sectors.
**/ **/
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
{ {
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
...@@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto ...@@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
__func__, max_hw_sectors); __func__, max_hw_sectors);
} }
q->limits.max_hw_sectors = max_hw_sectors; limits->max_hw_sectors = max_hw_sectors;
q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, limits->max_sectors = min_t(unsigned int, max_hw_sectors,
BLK_DEF_MAX_SECTORS); BLK_DEF_MAX_SECTORS);
}
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
/**
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
* @max_hw_sectors: max hardware sectors in the usual 512b unit
*
* Description:
* See description for blk_limits_max_hw_sectors().
**/
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
{
blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
} }
EXPORT_SYMBOL(blk_queue_max_hw_sectors); EXPORT_SYMBOL(blk_queue_max_hw_sectors);
......
...@@ -517,9 +517,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, ...@@ -517,9 +517,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
*/ */
if (q->merge_bvec_fn && !ti->type->merge) if (q->merge_bvec_fn && !ti->type->merge)
limits->max_sectors = blk_limits_max_hw_sectors(limits,
min_not_zero(limits->max_sectors, (unsigned int) (PAGE_SIZE >> 9));
(unsigned int) (PAGE_SIZE >> 9));
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(dm_set_device_limits); EXPORT_SYMBOL_GPL(dm_set_device_limits);
......
...@@ -808,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *, ...@@ -808,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
extern void blk_cleanup_queue(struct request_queue *); extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment