Commit 862e5a5e authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: use bio_for_each_bvec() to map sg

It is more efficient to use bio_for_each_bvec() to map sg, meantime
we have to consider splitting multipage bvec as done in blk_bio_segment_split().
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dcebd755
...@@ -464,6 +464,54 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -464,6 +464,54 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return biovec_phys_mergeable(q, &end_bv, &nxt_bv); return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
} }
static struct scatterlist *blk_next_sg(struct scatterlist **sg,
struct scatterlist *sglist)
{
if (!*sg)
return sglist;
/*
* If the driver previously mapped a shorter list, we could see a
* termination bit prematurely unless it fully inits the sg table
* on each mapping. We KNOW that there must be more entries here
* or the driver would be buggy, so force clear the termination bit
* to avoid doing a full sg_init_table() in drivers for each command.
*/
sg_unmark_end(*sg);
return sg_next(*sg);
}
static unsigned blk_bvec_map_sg(struct request_queue *q,
struct bio_vec *bvec, struct scatterlist *sglist,
struct scatterlist **sg)
{
unsigned nbytes = bvec->bv_len;
unsigned nsegs = 0, total = 0, offset = 0;
while (nbytes > 0) {
unsigned seg_size;
struct page *pg;
unsigned idx;
*sg = blk_next_sg(sg, sglist);
seg_size = get_max_segment_size(q, bvec->bv_offset + total);
seg_size = min(nbytes, seg_size);
offset = (total + bvec->bv_offset) % PAGE_SIZE;
idx = (total + bvec->bv_offset) / PAGE_SIZE;
pg = nth_page(bvec->bv_page, idx);
sg_set_page(*sg, pg, seg_size, offset);
total += seg_size;
nbytes -= seg_size;
nsegs++;
}
return nsegs;
}
static inline void static inline void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist *sglist, struct bio_vec *bvprv, struct scatterlist *sglist, struct bio_vec *bvprv,
...@@ -481,25 +529,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, ...@@ -481,25 +529,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
(*sg)->length += nbytes; (*sg)->length += nbytes;
} else { } else {
new_segment: new_segment:
if (!*sg) (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
*sg = sglist;
else {
/*
* If the driver previously mapped a shorter
* list, we could see a termination bit
* prematurely unless it fully inits the sg
* table on each mapping. We KNOW that there
* must be more entries here or the driver
* would be buggy, so force clear the
* termination bit to avoid doing a full
* sg_init_table() in drivers for each command.
*/
sg_unmark_end(*sg);
*sg = sg_next(*sg);
}
sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs)++;
} }
*bvprv = *bvec; *bvprv = *bvec;
} }
...@@ -521,7 +551,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, ...@@ -521,7 +551,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
int nsegs = 0; int nsegs = 0;
for_each_bio(bio) for_each_bio(bio)
bio_for_each_segment(bvec, bio, iter) bio_for_each_bvec(bvec, bio, iter)
__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
&nsegs); &nsegs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment