Commit 17882fd4 authored by Robert Hancock's avatar Robert Hancock Committed by David S. Miller

net: axienet: Clean up device used for DMA calls

Instead of using lp->ndev.parent to find the correct device to use for
DMA API calls, just use the dev attribute in the device structure.
Signed-off-by: default avatarRobert Hancock <robert.hancock@calian.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7a7d340b
...@@ -190,7 +190,7 @@ static void axienet_dma_bd_release(struct net_device *ndev) ...@@ -190,7 +190,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev); struct axienet_local *lp = netdev_priv(ndev);
/* If we end up here, tx_bd_v must have been DMA allocated. */ /* If we end up here, tx_bd_v must have been DMA allocated. */
dma_free_coherent(ndev->dev.parent, dma_free_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v, lp->tx_bd_v,
lp->tx_bd_p); lp->tx_bd_p);
...@@ -215,12 +215,12 @@ static void axienet_dma_bd_release(struct net_device *ndev) ...@@ -215,12 +215,12 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/ */
if (lp->rx_bd_v[i].cntrl) { if (lp->rx_bd_v[i].cntrl) {
phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
dma_unmap_single(ndev->dev.parent, phys, dma_unmap_single(lp->dev, phys,
lp->max_frm_size, DMA_FROM_DEVICE); lp->max_frm_size, DMA_FROM_DEVICE);
} }
} }
dma_free_coherent(ndev->dev.parent, dma_free_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num, sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v, lp->rx_bd_v,
lp->rx_bd_p); lp->rx_bd_p);
...@@ -249,13 +249,13 @@ static int axienet_dma_bd_init(struct net_device *ndev) ...@@ -249,13 +249,13 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0; lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */ /* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->tx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL); &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v) if (!lp->tx_bd_v)
return -ENOMEM; return -ENOMEM;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->rx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num, sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL); &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v) if (!lp->rx_bd_v)
...@@ -285,9 +285,9 @@ static int axienet_dma_bd_init(struct net_device *ndev) ...@@ -285,9 +285,9 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out; goto out;
lp->rx_bd_v[i].skb = skb; lp->rx_bd_v[i].skb = skb;
addr = dma_map_single(ndev->dev.parent, skb->data, addr = dma_map_single(lp->dev, skb->data,
lp->max_frm_size, DMA_FROM_DEVICE); lp->max_frm_size, DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, addr)) { if (dma_mapping_error(lp->dev, addr)) {
netdev_err(ndev, "DMA mapping error\n"); netdev_err(ndev, "DMA mapping error\n");
goto out; goto out;
} }
...@@ -636,7 +636,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, ...@@ -636,7 +636,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
/* Ensure we see complete descriptor update */ /* Ensure we see complete descriptor update */
dma_rmb(); dma_rmb();
phys = desc_get_phys_addr(lp, cur_p); phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, dma_unmap_single(lp->dev, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -774,9 +774,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -774,9 +774,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
} }
phys = dma_map_single(ndev->dev.parent, skb->data, phys = dma_map_single(lp->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n"); netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++; ndev->stats.tx_dropped++;
...@@ -790,11 +790,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -790,11 +790,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii]; frag = &skb_shinfo(skb)->frags[ii];
phys = dma_map_single(ndev->dev.parent, phys = dma_map_single(lp->dev,
skb_frag_address(frag), skb_frag_address(frag),
skb_frag_size(frag), skb_frag_size(frag),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n"); netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++; ndev->stats.tx_dropped++;
...@@ -872,7 +872,7 @@ static void axienet_recv(struct net_device *ndev) ...@@ -872,7 +872,7 @@ static void axienet_recv(struct net_device *ndev)
length = cur_p->app4 & 0x0000FFFF; length = cur_p->app4 & 0x0000FFFF;
phys = desc_get_phys_addr(lp, cur_p); phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size, dma_unmap_single(lp->dev, phys, lp->max_frm_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb_put(skb, length); skb_put(skb, length);
...@@ -905,10 +905,10 @@ static void axienet_recv(struct net_device *ndev) ...@@ -905,10 +905,10 @@ static void axienet_recv(struct net_device *ndev)
if (!new_skb) if (!new_skb)
break; break;
phys = dma_map_single(ndev->dev.parent, new_skb->data, phys = dma_map_single(lp->dev, new_skb->data,
lp->max_frm_size, lp->max_frm_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) { if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "RX DMA mapping error\n"); netdev_err(ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb); dev_kfree_skb(new_skb);
...@@ -1712,7 +1712,7 @@ static void axienet_dma_err_handler(struct work_struct *work) ...@@ -1712,7 +1712,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
if (cur_p->cntrl) { if (cur_p->cntrl) {
dma_addr_t addr = desc_get_phys_addr(lp, cur_p); dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, addr, dma_unmap_single(lp->dev, addr,
(cur_p->cntrl & (cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK), XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE); DMA_TO_DEVICE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment