Commit f6970f83 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: don't check if adjacent bvecs in one bio can be mergeable

Now both passthrough and FS IO have supported multi-page bvec, and
bvec merging has been handled actually when adding page to bio, then
adjacent bvecs won't be mergeable any more if they belong to same bio.

So only try to merge bvecs if they are from different bios.

Cc: Omar Sandoval <osandov@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 16e3e418
...@@ -354,11 +354,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -354,11 +354,11 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio) struct bio *bio)
{ {
struct bio_vec bv, bvprv = { NULL }; struct bio_vec bv, bvprv = { NULL };
int prev = 0;
unsigned int seg_size, nr_phys_segs; unsigned int seg_size, nr_phys_segs;
unsigned front_seg_size; unsigned front_seg_size;
struct bio *fbio, *bbio; struct bio *fbio, *bbio;
struct bvec_iter iter; struct bvec_iter iter;
bool new_bio = false;
if (!bio) if (!bio)
return 0; return 0;
...@@ -379,7 +379,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -379,7 +379,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
nr_phys_segs = 0; nr_phys_segs = 0;
for_each_bio(bio) { for_each_bio(bio) {
bio_for_each_bvec(bv, bio, iter) { bio_for_each_bvec(bv, bio, iter) {
if (prev) { if (new_bio) {
if (seg_size + bv.bv_len if (seg_size + bv.bv_len
> queue_max_segment_size(q)) > queue_max_segment_size(q))
goto new_segment; goto new_segment;
...@@ -387,7 +387,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -387,7 +387,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
goto new_segment; goto new_segment;
seg_size += bv.bv_len; seg_size += bv.bv_len;
bvprv = bv;
if (nr_phys_segs == 1 && seg_size > if (nr_phys_segs == 1 && seg_size >
front_seg_size) front_seg_size)
...@@ -396,12 +395,13 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -396,12 +395,13 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
continue; continue;
} }
new_segment: new_segment:
bvprv = bv;
prev = 1;
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
&front_seg_size, NULL, UINT_MAX); &front_seg_size, NULL, UINT_MAX);
new_bio = false;
} }
bbio = bio; bbio = bio;
bvprv = bv;
new_bio = true;
} }
fbio->bi_seg_front_size = front_seg_size; fbio->bi_seg_front_size = front_seg_size;
...@@ -501,29 +501,26 @@ static inline int __blk_bvec_map_sg(struct bio_vec bv, ...@@ -501,29 +501,26 @@ static inline int __blk_bvec_map_sg(struct bio_vec bv,
return 1; return 1;
} }
static inline void /* only try to merge bvecs into one sg if they are from two bios */
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, static inline bool
struct scatterlist *sglist, struct bio_vec *bvprv, __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist **sg, int *nsegs) struct bio_vec *bvprv, struct scatterlist **sg)
{ {
int nbytes = bvec->bv_len; int nbytes = bvec->bv_len;
if (*sg) { if (!*sg)
return false;
if ((*sg)->length + nbytes > queue_max_segment_size(q)) if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment; return false;
if (!biovec_phys_mergeable(q, bvprv, bvec)) if (!biovec_phys_mergeable(q, bvprv, bvec))
goto new_segment; return false;
(*sg)->length += nbytes; (*sg)->length += nbytes;
} else {
new_segment: return true;
if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) {
(*nsegs) += __blk_bvec_map_sg(*bvec, sglist, sg);
} else
(*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
}
*bvprv = *bvec;
} }
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
...@@ -533,11 +530,29 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, ...@@ -533,11 +530,29 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct bio_vec bvec, bvprv = { NULL }; struct bio_vec bvec, bvprv = { NULL };
struct bvec_iter iter; struct bvec_iter iter;
int nsegs = 0; int nsegs = 0;
bool new_bio = false;
for_each_bio(bio) for_each_bio(bio) {
bio_for_each_bvec(bvec, bio, iter) bio_for_each_bvec(bvec, bio, iter) {
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, /*
&nsegs); * Only try to merge bvecs from two bios given we
* have done bio internal merge when adding pages
* to bio
*/
if (new_bio &&
__blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
goto next_bvec;
if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
else
nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
next_bvec:
new_bio = false;
}
bvprv = bvec;
new_bio = true;
}
return nsegs; return nsegs;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment