Commit c55ddd90 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: pass struct queue_limits to the bio splitting helpers

Allow using the splitting helpers on just a queue_limits instead of
a full request_queue structure.  This will eventually allow file systems
or remapping drivers to split REQ_OP_ZONE_APPEND bios based on limits
calculated as the minimum common capabilities over multiple devices.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20220727162300.3089193-7-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b6dc6198
...@@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, ...@@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip->bip_vec + bip->bip_vcnt; iv = bip->bip_vec + bip->bip_vcnt;
if (bip->bip_vcnt && if (bip->bip_vcnt &&
bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), bvec_gap_to_prev(&bdev_get_queue(bio->bi_bdev)->limits,
&bip->bip_vec[bip->bip_vcnt - 1], offset)) &bip->bip_vec[bip->bip_vcnt - 1], offset))
return 0; return 0;
......
...@@ -965,7 +965,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio, ...@@ -965,7 +965,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
* would create a gap, disallow it. * would create a gap, disallow it.
*/ */
bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (bvec_gap_to_prev(q, bvec, offset)) if (bvec_gap_to_prev(&q->limits, bvec, offset))
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -2816,8 +2816,8 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2816,8 +2816,8 @@ void blk_mq_submit_bio(struct bio *bio)
blk_status_t ret; blk_status_t ret;
bio = blk_queue_bounce(bio, q); bio = blk_queue_bounce(bio, q);
if (bio_may_exceed_limits(bio, q)) if (bio_may_exceed_limits(bio, &q->limits))
bio = __bio_split_to_limits(bio, q, &nr_segs); bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
if (!bio_integrity_prep(bio)) if (!bio_integrity_prep(bio))
return; return;
......
...@@ -97,23 +97,23 @@ static inline bool biovec_phys_mergeable(struct request_queue *q, ...@@ -97,23 +97,23 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
return true; return true;
} }
static inline bool __bvec_gap_to_prev(struct request_queue *q, static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
struct bio_vec *bprv, unsigned int offset) struct bio_vec *bprv, unsigned int offset)
{ {
return (offset & queue_virt_boundary(q)) || return (offset & lim->virt_boundary_mask) ||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
} }
/* /*
* Check if adding a bio_vec after bprv with offset would create a gap in * Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do. * the SG list. Most drivers don't care about this, but some do.
*/ */
static inline bool bvec_gap_to_prev(struct request_queue *q, static inline bool bvec_gap_to_prev(struct queue_limits *lim,
struct bio_vec *bprv, unsigned int offset) struct bio_vec *bprv, unsigned int offset)
{ {
if (!queue_virt_boundary(q)) if (!lim->virt_boundary_mask)
return false; return false;
return __bvec_gap_to_prev(q, bprv, offset); return __bvec_gap_to_prev(lim, bprv, offset);
} }
static inline bool rq_mergeable(struct request *rq) static inline bool rq_mergeable(struct request *rq)
...@@ -189,7 +189,8 @@ static inline bool integrity_req_gap_back_merge(struct request *req, ...@@ -189,7 +189,8 @@ static inline bool integrity_req_gap_back_merge(struct request *req,
struct bio_integrity_payload *bip = bio_integrity(req->bio); struct bio_integrity_payload *bip = bio_integrity(req->bio);
struct bio_integrity_payload *bip_next = bio_integrity(next); struct bio_integrity_payload *bip_next = bio_integrity(next);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], return bvec_gap_to_prev(&req->q->limits,
&bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset); bip_next->bip_vec[0].bv_offset);
} }
...@@ -199,7 +200,8 @@ static inline bool integrity_req_gap_front_merge(struct request *req, ...@@ -199,7 +200,8 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_payload *bip_next = bio_integrity(req->bio); struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], return bvec_gap_to_prev(&req->q->limits,
&bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset); bip_next->bip_vec[0].bv_offset);
} }
...@@ -288,7 +290,8 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ...@@ -288,7 +290,8 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *, ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t); const char *, size_t);
static inline bool bio_may_exceed_limits(struct bio *bio, struct request_queue *q) static inline bool bio_may_exceed_limits(struct bio *bio,
struct queue_limits *lim)
{ {
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
...@@ -307,11 +310,11 @@ static inline bool bio_may_exceed_limits(struct bio *bio, struct request_queue * ...@@ -307,11 +310,11 @@ static inline bool bio_may_exceed_limits(struct bio *bio, struct request_queue *
* to the performance impact of cloned bios themselves the loop below * to the performance impact of cloned bios themselves the loop below
* doesn't matter anyway. * doesn't matter anyway.
*/ */
return q->limits.chunk_sectors || bio->bi_vcnt != 1 || return lim->chunk_sectors || bio->bi_vcnt != 1 ||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
} }
struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q, struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
unsigned int *nr_segs); unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio, int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs); unsigned int nr_segs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment