Commit f70167a7 authored by John Garry's avatar John Garry Committed by Jens Axboe

block: Generalize chunk_sectors support as boundary support

The purpose of the chunk_sectors limit is to ensure that a mergeble request
fits within the boundary of the chunck_sector value.

Such a feature will be useful for other request_queue boundary limits, so
generalize the chunk_sectors merge code.

This idea was proposed by Hannes Reinecke.
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJohn Garry <john.g.garry@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Acked-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20240620125359.2684798-3-john.g.garry@oracle.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8d1dfd51
...@@ -154,6 +154,11 @@ static struct bio *bio_split_write_zeroes(struct bio *bio, ...@@ -154,6 +154,11 @@ static struct bio *bio_split_write_zeroes(struct bio *bio,
return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs); return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
} }
static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim)
{
return lim->chunk_sectors;
}
/* /*
* Return the maximum number of sectors from the start of a bio that may be * Return the maximum number of sectors from the start of a bio that may be
* submitted as a single request to a block device. If enough sectors remain, * submitted as a single request to a block device. If enough sectors remain,
...@@ -167,12 +172,13 @@ static inline unsigned get_max_io_size(struct bio *bio, ...@@ -167,12 +172,13 @@ static inline unsigned get_max_io_size(struct bio *bio,
{ {
unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT; unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT; unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
unsigned boundary_sectors = blk_boundary_sectors(lim);
unsigned max_sectors = lim->max_sectors, start, end; unsigned max_sectors = lim->max_sectors, start, end;
if (lim->chunk_sectors) { if (boundary_sectors) {
max_sectors = min(max_sectors, max_sectors = min(max_sectors,
blk_chunk_sectors_left(bio->bi_iter.bi_sector, blk_boundary_sectors_left(bio->bi_iter.bi_sector,
lim->chunk_sectors)); boundary_sectors));
} }
start = bio->bi_iter.bi_sector & (pbs - 1); start = bio->bi_iter.bi_sector & (pbs - 1);
...@@ -588,19 +594,21 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq, ...@@ -588,19 +594,21 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
sector_t offset) sector_t offset)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
unsigned int max_sectors; struct queue_limits *lim = &q->limits;
unsigned int max_sectors, boundary_sectors;
if (blk_rq_is_passthrough(rq)) if (blk_rq_is_passthrough(rq))
return q->limits.max_hw_sectors; return q->limits.max_hw_sectors;
boundary_sectors = blk_boundary_sectors(lim);
max_sectors = blk_queue_get_max_sectors(rq); max_sectors = blk_queue_get_max_sectors(rq);
if (!q->limits.chunk_sectors || if (!boundary_sectors ||
req_op(rq) == REQ_OP_DISCARD || req_op(rq) == REQ_OP_DISCARD ||
req_op(rq) == REQ_OP_SECURE_ERASE) req_op(rq) == REQ_OP_SECURE_ERASE)
return max_sectors; return max_sectors;
return min(max_sectors, return min(max_sectors,
blk_chunk_sectors_left(offset, q->limits.chunk_sectors)); blk_boundary_sectors_left(offset, boundary_sectors));
} }
static inline int ll_new_hw_segment(struct request *req, struct bio *bio, static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
......
...@@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector, ...@@ -1188,7 +1188,7 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
return len; return len;
return min_t(sector_t, len, return min_t(sector_t, len,
min(max_sectors ? : queue_max_sectors(ti->table->md->queue), min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
blk_chunk_sectors_left(target_offset, max_granularity))); blk_boundary_sectors_left(target_offset, max_granularity)));
} }
static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
......
...@@ -907,14 +907,15 @@ static inline bool bio_straddles_zones(struct bio *bio) ...@@ -907,14 +907,15 @@ static inline bool bio_straddles_zones(struct bio *bio)
} }
/* /*
* Return how much of the chunk is left to be used for I/O at a given offset. * Return how much within the boundary is left to be used for I/O at a given
* offset.
*/ */
static inline unsigned int blk_chunk_sectors_left(sector_t offset, static inline unsigned int blk_boundary_sectors_left(sector_t offset,
unsigned int chunk_sectors) unsigned int boundary_sectors)
{ {
if (unlikely(!is_power_of_2(chunk_sectors))) if (unlikely(!is_power_of_2(boundary_sectors)))
return chunk_sectors - sector_div(offset, chunk_sectors); return boundary_sectors - sector_div(offset, boundary_sectors);
return chunk_sectors - (offset & (chunk_sectors - 1)); return boundary_sectors - (offset & (boundary_sectors - 1));
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment