Commit 0e5eca79 authored by Vipul Pandya's avatar Vipul Pandya Committed by David S. Miller

RDMA/cxgb4: Map pbl buffers for dma if using DSGL.

Signed-off-by: default avatarVipul Pandya <vipul@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 42b6a949
...@@ -51,7 +51,7 @@ module_param(inline_threshold, int, 0644); ...@@ -51,7 +51,7 @@ module_param(inline_threshold, int, 0644);
MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
u32 len, void *data, int wait) u32 len, dma_addr_t data, int wait)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct ulp_mem_io *req; struct ulp_mem_io *req;
...@@ -88,7 +88,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, ...@@ -88,7 +88,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
ULPTX_NSGE(1)); ULPTX_NSGE(1));
sgl->len0 = cpu_to_be32(len); sgl->len0 = cpu_to_be32(len);
sgl->addr0 = cpu_to_be64(virt_to_phys(data)); sgl->addr0 = cpu_to_be64(data);
ret = c4iw_ofld_send(rdev, skb); ret = c4iw_ofld_send(rdev, skb);
if (ret) if (ret)
...@@ -178,6 +178,13 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) ...@@ -178,6 +178,13 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
u32 remain = len; u32 remain = len;
u32 dmalen; u32 dmalen;
int ret = 0; int ret = 0;
dma_addr_t daddr;
dma_addr_t save;
daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
return -1;
save = daddr;
while (remain > inline_threshold) { while (remain > inline_threshold) {
if (remain < T4_ULPTX_MAX_DMA) { if (remain < T4_ULPTX_MAX_DMA) {
...@@ -188,16 +195,18 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) ...@@ -188,16 +195,18 @@ int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
} else } else
dmalen = T4_ULPTX_MAX_DMA; dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen; remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, data, ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
!remain); !remain);
if (ret) if (ret)
goto out; goto out;
addr += dmalen >> 5; addr += dmalen >> 5;
data += dmalen; data += dmalen;
daddr += dmalen;
} }
if (remain) if (remain)
ret = _c4iw_write_mem_inline(rdev, addr, remain, data); ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
out: out:
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
return ret; return ret;
} }
...@@ -209,9 +218,17 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, ...@@ -209,9 +218,17 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
void *data) void *data)
{ {
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
if (len > inline_threshold) if (len > inline_threshold) {
return _c4iw_write_mem_dma(rdev, addr, len, data); if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
else printk_ratelimited(KERN_WARNING
"%s: dma map"
" failure (non fatal)\n",
pci_name(rdev->lldi.pdev));
return _c4iw_write_mem_inline(rdev, addr, len,
data);
} else
return 0;
} else
return _c4iw_write_mem_inline(rdev, addr, len, data); return _c4iw_write_mem_inline(rdev, addr, len, data);
} else } else
return _c4iw_write_mem_inline(rdev, addr, len, data); return _c4iw_write_mem_inline(rdev, addr, len, data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment