Commit 1e97af7f authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Christoph Hellwig

RDMA/rw: drop pci_p2pdma_[un]map_sg()

dma_map_sg() now supports the use of P2PDMA pages so pci_p2pdma_map_sg()
is no longer necessary and may be dropped. This means the
rdma_rw_[un]map_sg() helpers are no longer necessary. Remove it all.
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 495758bb
......@@ -274,33 +274,6 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return 1;
}
static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
u32 sg_cnt, enum dma_data_direction dir)
{
if (is_pci_p2pdma_page(sg_page(sg)))
pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
else
ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
}
static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
enum dma_data_direction dir)
{
int nents;
if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
return 0;
nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
sgt->orig_nents, dir);
if (!nents)
return -EIO;
sgt->nents = nents;
return 0;
}
return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
}
/**
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
* @ctx: context to initialize
......@@ -327,7 +300,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
};
int ret;
ret = rdma_rw_map_sgtable(dev, &sgt, dir);
ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
sg_cnt = sgt.nents;
......@@ -366,7 +339,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
return ret;
out_unmap_sg:
rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
......@@ -414,12 +387,12 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
ret = rdma_rw_map_sgtable(dev, &sgt, dir);
ret = ib_dma_map_sgtable_attrs(dev, &sgt, dir, 0);
if (ret)
return ret;
if (prot_sg_cnt) {
ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
ret = ib_dma_map_sgtable_attrs(dev, &prot_sgt, dir, 0);
if (ret)
goto out_unmap_sg;
}
......@@ -486,9 +459,9 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
kfree(ctx->reg);
out_unmap_prot_sg:
if (prot_sgt.nents)
rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
ib_dma_unmap_sgtable_attrs(dev, &prot_sgt, dir, 0);
out_unmap_sg:
rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
ib_dma_unmap_sgtable_attrs(dev, &sgt, dir, 0);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
......@@ -621,7 +594,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
break;
}
rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
......@@ -649,8 +622,8 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
kfree(ctx->reg);
if (prot_sg_cnt)
rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment