Commit 762380ad authored by Jens Axboe's avatar Jens Axboe

block: add notion of a chunk size for request merging

Some drivers have different limits on what size a request should
optimally be, depending on the offset of the request. Similar to
dividing a device into chunks. Add a setting that allows the driver
to inform the block layer of such a chunk size. The block layer will
then prevent merging across the chunks.

This is needed to optimally support NVMe with a non-zero stripe size.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 046f1533
...@@ -849,7 +849,8 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, ...@@ -849,7 +849,8 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset) unsigned int offset)
{ {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
return __bio_add_page(q, bio, page, len, offset, blk_max_size_offset(q, bio->bi_iter.bi_sector));
} }
EXPORT_SYMBOL(bio_add_page); EXPORT_SYMBOL(bio_add_page);
......
...@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim) ...@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0; lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0; lim->max_discard_sectors = 0;
lim->discard_granularity = 0; lim->discard_granularity = 0;
...@@ -276,6 +277,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto ...@@ -276,6 +277,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
} }
EXPORT_SYMBOL(blk_queue_max_hw_sectors); EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
* blk_queue_chunk_sectors - set size of the chunk for this queue
* @q: the request queue for the device
* @chunk_sectors: chunk sectors in the usual 512b unit
*
* Description:
* If a driver doesn't want IOs to cross a given chunk size, it can set
* this limit and prevent merging across chunks. Note that the chunk size
* must currently be a power-of-2 in sectors.
**/
void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
{
BUG_ON(!is_power_of_2(chunk_sectors));
q->limits.chunk_sectors = chunk_sectors;
}
EXPORT_SYMBOL(blk_queue_chunk_sectors);
/** /**
* blk_queue_max_discard_sectors - set max sectors for a single discard * blk_queue_max_discard_sectors - set max sectors for a single discard
* @q: the request queue for the device * @q: the request queue for the device
......
...@@ -280,6 +280,7 @@ struct queue_limits { ...@@ -280,6 +280,7 @@ struct queue_limits {
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
unsigned int max_hw_sectors; unsigned int max_hw_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors; unsigned int max_sectors;
unsigned int max_segment_size; unsigned int max_segment_size;
unsigned int physical_block_size; unsigned int physical_block_size;
...@@ -910,6 +911,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, ...@@ -910,6 +911,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
return q->limits.max_sectors; return q->limits.max_sectors;
} }
/*
* Return maximum size of a request at given offset. Only valid for
* file system requests.
*/
static inline unsigned int blk_max_size_offset(struct request_queue *q,
sector_t offset)
{
if (!q->limits.chunk_sectors)
return q->limits.max_hw_sectors;
return q->limits.chunk_sectors -
(offset & (q->limits.chunk_sectors - 1));
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq) static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
...@@ -917,7 +932,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) ...@@ -917,7 +932,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
return q->limits.max_hw_sectors; return q->limits.max_hw_sectors;
return blk_queue_get_max_sectors(q, rq->cmd_flags); if (!q->limits.chunk_sectors)
return blk_queue_get_max_sectors(q, rq->cmd_flags);
return min(blk_max_size_offset(q, blk_rq_pos(rq)),
blk_queue_get_max_sectors(q, rq->cmd_flags));
} }
static inline unsigned int blk_rq_count_bios(struct request *rq) static inline unsigned int blk_rq_count_bios(struct request *rq)
...@@ -983,6 +1002,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *); ...@@ -983,6 +1002,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q, extern void blk_queue_max_discard_sectors(struct request_queue *q,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment