Commit 48d7727c authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: optimize __blk_segment_map_sg() for single-page bvec

Introduce a fast path for single-page bvec IO, then blk_bvec_map_sg()
can be avoided.
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4d633062
...@@ -447,7 +447,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -447,7 +447,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return biovec_phys_mergeable(q, &end_bv, &nxt_bv); return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
} }
static struct scatterlist *blk_next_sg(struct scatterlist **sg, static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
struct scatterlist *sglist) struct scatterlist *sglist)
{ {
if (!*sg) if (!*sg)
...@@ -512,7 +512,12 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, ...@@ -512,7 +512,12 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
(*sg)->length += nbytes; (*sg)->length += nbytes;
} else { } else {
new_segment: new_segment:
(*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) {
*sg = blk_next_sg(sg, sglist);
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs) += 1;
} else
(*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
} }
*bvprv = *bvec; *bvprv = *bvec;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment