Commit 76830840 authored by Matthew Wilcox's avatar Matthew Wilcox

NVMe: Handle physical merging of bvec entries

In order to not overrun the sg array, we have to merge physically
contiguous pages into a single sg entry.
Signed-off-by: default avatarMatthew Wilcox <matthew.r.wilcox@intel.com>
parent 1974b1ae
...@@ -392,19 +392,25 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev, ...@@ -392,19 +392,25 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio, static int nvme_map_bio(struct device *dev, struct nvme_bio *nbio,
struct bio *bio, enum dma_data_direction dma_dir, int psegs) struct bio *bio, enum dma_data_direction dma_dir, int psegs)
{ {
struct bio_vec *bvec; struct bio_vec *bvec, *bvprv = NULL;
struct scatterlist *sg = nbio->sg; struct scatterlist *sg = NULL;
int i, nsegs; int i, nsegs = 0;
sg_init_table(sg, psegs); sg_init_table(nbio->sg, psegs);
bio_for_each_segment(bvec, bio, i) { bio_for_each_segment(bvec, bio, i) {
sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
sg++; sg->length += bvec->bv_len;
/* XXX: handle non-mergable here */ } else {
nsegs++; /* Check bvprv && offset == 0 */
sg = sg ? sg + 1 : nbio->sg;
sg_set_page(sg, bvec->bv_page, bvec->bv_len,
bvec->bv_offset);
nsegs++;
}
bvprv = bvec;
} }
nbio->nents = nsegs; nbio->nents = nsegs;
sg_mark_end(sg);
return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir); return dma_map_sg(dev, nbio->sg, nbio->nents, dma_dir);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment