Commit 1c2ad9fa authored by Matthew Wilcox's avatar Matthew Wilcox

NVMe: Simplify nvme_unmap_user_pages

By using the iod->nents field (the same way other I/O paths do), we can
avoid recalculating the number of sg entries at unmap time, and make
nvme_unmap_user_pages() easier to call.

Also, use the 'write' parameter instead of assuming DMA_FROM_DEVICE.
Signed-off-by: default avatarMatthew Wilcox <matthew.r.wilcox@intel.com>
parent fe304c43
...@@ -1046,6 +1046,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, ...@@ -1046,6 +1046,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
offset = 0; offset = 0;
} }
sg_mark_end(&sg[i - 1]); sg_mark_end(&sg[i - 1]);
iod->nents = count;
err = -ENOMEM; err = -ENOMEM;
nents = dma_map_sg(&dev->pci_dev->dev, sg, count, nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
...@@ -1066,16 +1067,15 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, ...@@ -1066,16 +1067,15 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
} }
static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
unsigned long addr, int length, struct nvme_iod *iod) struct nvme_iod *iod)
{ {
struct scatterlist *sg = iod->sg; int i;
int i, count;
count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE); dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE); write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
for (i = 0; i < count; i++) for (i = 0; i < iod->nents; i++)
put_page(sg_page(&sg[i])); put_page(sg_page(&iod->sg[i]));
} }
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
...@@ -1132,7 +1132,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1132,7 +1132,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
else else
status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, iod); nvme_unmap_user_pages(dev, io.opcode & 1, iod);
nvme_free_iod(dev, iod); nvme_free_iod(dev, iod);
return status; return status;
} }
...@@ -1180,8 +1180,7 @@ static int nvme_user_admin_cmd(struct nvme_ns *ns, ...@@ -1180,8 +1180,7 @@ static int nvme_user_admin_cmd(struct nvme_ns *ns,
status = nvme_submit_admin_cmd(dev, &c, NULL); status = nvme_submit_admin_cmd(dev, &c, NULL);
if (cmd.data_len) { if (cmd.data_len) {
nvme_unmap_user_pages(dev, cmd.opcode & 1, cmd.addr, nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
cmd.data_len, iod);
nvme_free_iod(dev, iod); nvme_free_iod(dev, iod);
} }
return status; return status;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment