Commit 8e03a5e7 authored by Sergei Shtylyov's avatar Sergei Shtylyov Committed by David S. Miller

sh_eth: merge sh_eth_free_dma_buffer() into sh_eth_ring_free()

While the ring allocation is done by a single function, sh_eth_ring_init(),
the ring deallocation was split into two functions (almost always called
one after the other) for no good reason. Merge  sh_eth_free_dma_buffer()
into sh_eth_ring_free() which allows us  to save space not only on the
direct calls  of the former function but also on the sh_eth_ring_init()'s
simplified error path...
Signed-off-by: default avatarSergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 91d80683
...@@ -1098,7 +1098,7 @@ static struct mdiobb_ops bb_ops = { ...@@ -1098,7 +1098,7 @@ static struct mdiobb_ops bb_ops = {
static void sh_eth_ring_free(struct net_device *ndev) static void sh_eth_ring_free(struct net_device *ndev)
{ {
struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_private *mdp = netdev_priv(ndev);
int i; int ringsize, i;
/* Free Rx skb ringbuffer */ /* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) { if (mdp->rx_skbuff) {
...@@ -1115,6 +1115,20 @@ static void sh_eth_ring_free(struct net_device *ndev) ...@@ -1115,6 +1115,20 @@ static void sh_eth_ring_free(struct net_device *ndev)
} }
kfree(mdp->tx_skbuff); kfree(mdp->tx_skbuff);
mdp->tx_skbuff = NULL; mdp->tx_skbuff = NULL;
if (mdp->rx_ring) {
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
mdp->rx_desc_dma);
mdp->rx_ring = NULL;
}
if (mdp->tx_ring) {
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
mdp->tx_desc_dma);
mdp->tx_ring = NULL;
}
} }
/* format skb and descriptor buffer */ /* format skb and descriptor buffer */
...@@ -1220,14 +1234,14 @@ static int sh_eth_ring_init(struct net_device *ndev) ...@@ -1220,14 +1234,14 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
GFP_KERNEL); GFP_KERNEL);
if (!mdp->tx_skbuff) if (!mdp->tx_skbuff)
goto skb_ring_free; goto ring_free;
/* Allocate all Rx descriptors. */ /* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL); GFP_KERNEL);
if (!mdp->rx_ring) if (!mdp->rx_ring)
goto skb_ring_free; goto ring_free;
mdp->dirty_rx = 0; mdp->dirty_rx = 0;
...@@ -1236,41 +1250,16 @@ static int sh_eth_ring_init(struct net_device *ndev) ...@@ -1236,41 +1250,16 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL); GFP_KERNEL);
if (!mdp->tx_ring) if (!mdp->tx_ring)
goto desc_ring_free; goto ring_free;
return 0; return 0;
desc_ring_free: ring_free:
/* free DMA buffer */ /* Free Rx and Tx skb ring buffer and DMA buffer */
dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
skb_ring_free:
/* Free Rx and Tx skb ring buffer */
sh_eth_ring_free(ndev); sh_eth_ring_free(ndev);
mdp->tx_ring = NULL;
mdp->rx_ring = NULL;
return -ENOMEM; return -ENOMEM;
} }
static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
{
int ringsize;
if (mdp->rx_ring) {
ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
dma_free_coherent(NULL, ringsize, mdp->rx_ring,
mdp->rx_desc_dma);
mdp->rx_ring = NULL;
}
if (mdp->tx_ring) {
ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
dma_free_coherent(NULL, ringsize, mdp->tx_ring,
mdp->tx_desc_dma);
mdp->tx_ring = NULL;
}
}
static int sh_eth_dev_init(struct net_device *ndev, bool start) static int sh_eth_dev_init(struct net_device *ndev, bool start)
{ {
int ret = 0; int ret = 0;
...@@ -2231,10 +2220,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev, ...@@ -2231,10 +2220,8 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
sh_eth_dev_exit(ndev); sh_eth_dev_exit(ndev);
/* Free all the skbuffs in the Rx queue. */ /* Free all the skbuffs in the Rx queue and the DMA buffers. */
sh_eth_ring_free(ndev); sh_eth_ring_free(ndev);
/* Free DMA buffer */
sh_eth_free_dma_buffer(mdp);
} }
/* Set new parameters */ /* Set new parameters */
...@@ -2479,12 +2466,9 @@ static int sh_eth_close(struct net_device *ndev) ...@@ -2479,12 +2466,9 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev); free_irq(ndev->irq, ndev);
/* Free all the skbuffs in the Rx queue. */ /* Free all the skbuffs in the Rx queue and the DMA buffer. */
sh_eth_ring_free(ndev); sh_eth_ring_free(ndev);
/* free DMA buffer */
sh_eth_free_dma_buffer(mdp);
pm_runtime_put_sync(&mdp->pdev->dev); pm_runtime_put_sync(&mdp->pdev->dev);
mdp->is_opened = 0; mdp->is_opened = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment