Commit 783b94bd authored by Christoph Hellwig's avatar Christoph Hellwig

nvme-pci: do not build a scatterlist to map metadata

We always have exactly one segment, so we can simply call dma_map_bvec.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
parent b15c592d
...@@ -221,7 +221,7 @@ struct nvme_iod { ...@@ -221,7 +221,7 @@ struct nvme_iod {
int npages; /* In the PRP list. 0 means small pool in use */ int npages; /* In the PRP list. 0 means small pool in use */
int nents; /* Used in scatterlist */ int nents; /* Used in scatterlist */
dma_addr_t first_dma; dma_addr_t first_dma;
struct scatterlist meta_sg; /* metadata requires single contiguous buffer */ dma_addr_t meta_dma;
struct scatterlist *sg; struct scatterlist *sg;
struct scatterlist inline_sg[0]; struct scatterlist inline_sg[0];
}; };
...@@ -592,13 +592,16 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) ...@@ -592,13 +592,16 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
dma_addr_t dma_addr = iod->first_dma, next_dma_addr; dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
int i; int i;
if (blk_integrity_rq(req)) {
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, dma_dir);
}
if (iod->nents) { if (iod->nents) {
/* P2PDMA requests do not need to be unmapped */ /* P2PDMA requests do not need to be unmapped */
if (!is_pci_p2pdma_page(sg_page(iod->sg))) if (!is_pci_p2pdma_page(sg_page(iod->sg)))
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
if (blk_integrity_rq(req))
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
} }
if (iod->npages == 0) if (iod->npages == 0)
...@@ -861,17 +864,11 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, ...@@ -861,17 +864,11 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
ret = BLK_STS_IOERR; ret = BLK_STS_IOERR;
if (blk_integrity_rq(req)) { if (blk_integrity_rq(req)) {
if (blk_rq_count_integrity_sg(q, req->bio) != 1) iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
goto out; dma_dir, 0);
if (dma_mapping_error(dev->dev, iod->meta_dma))
sg_init_table(&iod->meta_sg, 1);
if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
goto out;
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
goto out; goto out;
cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
} }
return BLK_STS_OK; return BLK_STS_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment