Commit 963ab9e5 authored by Asias He's avatar Asias He Committed by Jens Axboe

block: Introduce __blk_segment_map_sg() helper

Split the mapping code in blk_rq_map_sg() to a helper
__blk_segment_map_sg(), so that other mapping function, e.g.
blk_bio_map_sg(), can share the code.

Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Tejun Heo <tj@kernel.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: virtualization@lists.linux-foundation.org
Suggested-by: default avatarJens Axboe <axboe@kernel.dk>
Suggested-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarAsias He <asias@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 53362a05
...@@ -110,43 +110,28 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, ...@@ -110,43 +110,28 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0; return 0;
} }
/* static void
* map a request to scatterlist, return number of sg entries setup. Caller __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
* must make sure sg can hold rq->nr_phys_segments entries struct scatterlist *sglist, struct bio_vec **bvprv,
*/ struct scatterlist **sg, int *nsegs, int *cluster)
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{ {
struct bio_vec *bvec, *bvprv;
struct req_iterator iter;
struct scatterlist *sg;
int nsegs, cluster;
nsegs = 0;
cluster = blk_queue_cluster(q);
/*
* for each bio in rq
*/
bvprv = NULL;
sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len; int nbytes = bvec->bv_len;
if (bvprv && cluster) { if (*bvprv && *cluster) {
if (sg->length + nbytes > queue_max_segment_size(q)) if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment; goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
goto new_segment; goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
goto new_segment; goto new_segment;
sg->length += nbytes; (*sg)->length += nbytes;
} else { } else {
new_segment: new_segment:
if (!sg) if (!*sg)
sg = sglist; *sg = sglist;
else { else {
/* /*
* If the driver previously mapped a shorter * If the driver previously mapped a shorter
...@@ -158,14 +143,39 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -158,14 +143,39 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* termination bit to avoid doing a full * termination bit to avoid doing a full
* sg_init_table() in drivers for each command. * sg_init_table() in drivers for each command.
*/ */
sg->page_link &= ~0x02; (*sg)->page_link &= ~0x02;
sg = sg_next(sg); *sg = sg_next(*sg);
} }
sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
nsegs++; (*nsegs)++;
} }
bvprv = bvec; *bvprv = bvec;
}
/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
*/
int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{
struct bio_vec *bvec, *bvprv;
struct req_iterator iter;
struct scatterlist *sg;
int nsegs, cluster;
nsegs = 0;
cluster = blk_queue_cluster(q);
/*
* for each bio in rq
*/
bvprv = NULL;
sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in rq */ } /* segments in rq */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment