Commit 05b700ba authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: fix segment calculation for passthrough IO

blk_recount_segments() can be called in bio_add_pc_page() for
calculating how many segments this bio will has after one page is added
to this bio. If the resulted segment number is beyond the queue limit,
the added page will be removed.

The try-and-fix policy requires blk_recount_segments(__blk_recalc_rq_segments)
to not consider the segment number limit. Unfortunately bvec_split_segs()
does check this limit, and causes small segment number returned to
bio_add_pc_page(), then page still may be added to the bio even though
segment number limit becomes broken.

Fixes this issue by not considering segment number limit when calcualting
bio's segment number.

Fixes: dcebd755 ("block: use bio_for_each_bvec() to compute multi-page bvec count")
Cc: Christoph Hellwig <hch@lst.de>
Cc: Omar Sandoval <osandov@fb.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e61750c8
...@@ -180,7 +180,7 @@ static unsigned get_max_segment_size(struct request_queue *q, ...@@ -180,7 +180,7 @@ static unsigned get_max_segment_size(struct request_queue *q,
*/ */
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
unsigned *nsegs, unsigned *last_seg_size, unsigned *nsegs, unsigned *last_seg_size,
unsigned *front_seg_size, unsigned *sectors) unsigned *front_seg_size, unsigned *sectors, unsigned max_segs)
{ {
unsigned len = bv->bv_len; unsigned len = bv->bv_len;
unsigned total_len = 0; unsigned total_len = 0;
...@@ -190,7 +190,7 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, ...@@ -190,7 +190,7 @@ static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
* Multi-page bvec may be too big to hold in one segment, so the * Multi-page bvec may be too big to hold in one segment, so the
* current bvec has to be splitted as multiple segments. * current bvec has to be splitted as multiple segments.
*/ */
while (len && new_nsegs + *nsegs < queue_max_segments(q)) { while (len && new_nsegs + *nsegs < max_segs) {
seg_size = get_max_segment_size(q, bv->bv_offset + total_len); seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
seg_size = min(seg_size, len); seg_size = min(seg_size, len);
...@@ -240,6 +240,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -240,6 +240,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bool do_split = true; bool do_split = true;
struct bio *new = NULL; struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio); const unsigned max_sectors = get_max_io_size(q, bio);
const unsigned max_segs = queue_max_segments(q);
bio_for_each_bvec(bv, bio, iter) { bio_for_each_bvec(bv, bio, iter) {
/* /*
...@@ -254,14 +255,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -254,14 +255,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
* Consider this a new segment if we're splitting in * Consider this a new segment if we're splitting in
* the middle of this vector. * the middle of this vector.
*/ */
if (nsegs < queue_max_segments(q) && if (nsegs < max_segs &&
sectors < max_sectors) { sectors < max_sectors) {
/* split in the middle of bvec */ /* split in the middle of bvec */
bv.bv_len = (max_sectors - sectors) << 9; bv.bv_len = (max_sectors - sectors) << 9;
bvec_split_segs(q, &bv, &nsegs, bvec_split_segs(q, &bv, &nsegs,
&seg_size, &seg_size,
&front_seg_size, &front_seg_size,
&sectors); &sectors, max_segs);
} }
goto split; goto split;
} }
...@@ -283,7 +284,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -283,7 +284,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
continue; continue;
} }
new_segment: new_segment:
if (nsegs == queue_max_segments(q)) if (nsegs == max_segs)
goto split; goto split;
bvprv = bv; bvprv = bv;
...@@ -296,7 +297,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -296,7 +297,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (nsegs == 1 && seg_size > front_seg_size) if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size; front_seg_size = seg_size;
} else if (bvec_split_segs(q, &bv, &nsegs, &seg_size, } else if (bvec_split_segs(q, &bv, &nsegs, &seg_size,
&front_seg_size, &sectors)) { &front_seg_size, &sectors, max_segs)) {
goto split; goto split;
} }
} }
...@@ -415,7 +416,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -415,7 +416,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
bvprv = bv; bvprv = bv;
prev = 1; prev = 1;
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
&front_seg_size, NULL); &front_seg_size, NULL, UINT_MAX);
} }
bbio = bio; bbio = bio;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment