Commit 56329137 authored by Bartlomiej Zolnierkiewicz's avatar Bartlomiej Zolnierkiewicz Committed by David S. Miller

stmmac: fix init_dma_desc_rings() to handle errors

In stmmac_init_rx_buffers():
* add missing handling of dma_map_single() error
* remove superfluous unlikely() optimization while at it

Add stmmac_free_rx_buffers() helper and use it in dma_free_rx_skbufs().

In init_dma_desc_rings():
* add missing handling of kmalloc_array() errors
* fix handling of dma_alloc_coherent() and stmmac_init_rx_buffers() errors
* make function return an error value on error and 0 on success

In stmmac_open():
* add handling of init_dma_desc_rings() return value
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d4cca39d
...@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
GFP_KERNEL); GFP_KERNEL);
if (unlikely(skb == NULL)) { if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__); pr_err("%s: Rx init fails; skb is NULL\n", __func__);
return 1; return -ENOMEM;
} }
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb; priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz, priv->dma_buf_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
pr_err("%s: DMA mapping error\n", __func__);
dev_kfree_skb_any(skb);
return -EINVAL;
}
p->des2 = priv->rx_skbuff_dma[i]; p->des2 = priv->rx_skbuff_dma[i];
...@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return 0; return 0;
} }
static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
{
if (priv->rx_skbuff[i]) {
dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
priv->dma_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_skbuff[i]);
}
priv->rx_skbuff[i] = NULL;
}
/** /**
* init_dma_desc_rings - init the RX/TX descriptor rings * init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure * @dev: net device structure
...@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, ...@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
* and allocates the socket buffers. It suppors the chained and ring * and allocates the socket buffers. It suppors the chained and ring
* modes. * modes.
*/ */
static void init_dma_desc_rings(struct net_device *dev) static int init_dma_desc_rings(struct net_device *dev)
{ {
int i; int i;
struct stmmac_priv *priv = netdev_priv(dev); struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size; unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size; unsigned int rxsize = priv->dma_rx_size;
unsigned int bfsize = 0; unsigned int bfsize = 0;
int ret = -ENOMEM;
/* Set the max buffer size according to the DESC mode /* Set the max buffer size according to the DESC mode
* and the MTU. Note that RING mode allows 16KiB bsize. * and the MTU. Note that RING mode allows 16KiB bsize.
...@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) ...@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
dma_extended_desc), dma_extended_desc),
&priv->dma_rx_phy, &priv->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
if (!priv->dma_erx)
goto err_dma;
priv->dma_etx = dma_alloc_coherent(priv->device, txsize * priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct sizeof(struct
dma_extended_desc), dma_extended_desc),
&priv->dma_tx_phy, &priv->dma_tx_phy,
GFP_KERNEL); GFP_KERNEL);
if ((!priv->dma_erx) || (!priv->dma_etx)) if (!priv->dma_etx) {
return; dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
priv->dma_erx, priv->dma_rx_phy);
goto err_dma;
}
} else { } else {
priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
sizeof(struct dma_desc), sizeof(struct dma_desc),
&priv->dma_rx_phy, &priv->dma_rx_phy,
GFP_KERNEL); GFP_KERNEL);
if (!priv->dma_rx)
goto err_dma;
priv->dma_tx = dma_alloc_coherent(priv->device, txsize * priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct dma_desc), sizeof(struct dma_desc),
&priv->dma_tx_phy, &priv->dma_tx_phy,
GFP_KERNEL); GFP_KERNEL);
if ((!priv->dma_rx) || (!priv->dma_tx)) if (!priv->dma_tx) {
return; dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_desc),
priv->dma_rx, priv->dma_rx_phy);
goto err_dma;
}
} }
priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
GFP_KERNEL); GFP_KERNEL);
if (!priv->rx_skbuff_dma)
goto err_rx_skbuff_dma;
priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!priv->rx_skbuff)
goto err_rx_skbuff;
priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
GFP_KERNEL); GFP_KERNEL);
if (!priv->tx_skbuff_dma)
goto err_tx_skbuff_dma;
priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!priv->tx_skbuff)
goto err_tx_skbuff;
if (netif_msg_probe(priv)) { if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
...@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) ...@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
else else
p = priv->dma_rx + i; p = priv->dma_rx + i;
if (stmmac_init_rx_buffers(priv, p, i)) ret = stmmac_init_rx_buffers(priv, p, i);
break; if (ret)
goto err_init_rx_buffers;
if (netif_msg_probe(priv)) if (netif_msg_probe(priv))
pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
...@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) ...@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
if (netif_msg_hw(priv)) if (netif_msg_hw(priv))
stmmac_display_rings(priv); stmmac_display_rings(priv);
return 0;
err_init_rx_buffers:
while (--i >= 0)
stmmac_free_rx_buffers(priv, i);
kfree(priv->tx_skbuff);
err_tx_skbuff:
kfree(priv->tx_skbuff_dma);
err_tx_skbuff_dma:
kfree(priv->rx_skbuff);
err_rx_skbuff:
kfree(priv->rx_skbuff_dma);
err_rx_skbuff_dma:
if (priv->extend_desc) {
dma_free_coherent(priv->device, priv->dma_tx_size *
sizeof(struct dma_extended_desc),
priv->dma_etx, priv->dma_tx_phy);
dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
priv->dma_erx, priv->dma_rx_phy);
} else {
dma_free_coherent(priv->device,
priv->dma_tx_size * sizeof(struct dma_desc),
priv->dma_tx, priv->dma_tx_phy);
dma_free_coherent(priv->device,
priv->dma_rx_size * sizeof(struct dma_desc),
priv->dma_rx, priv->dma_rx_phy);
}
err_dma:
return ret;
} }
static void dma_free_rx_skbufs(struct stmmac_priv *priv) static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{ {
int i; int i;
for (i = 0; i < priv->dma_rx_size; i++) { for (i = 0; i < priv->dma_rx_size; i++)
if (priv->rx_skbuff[i]) { stmmac_free_rx_buffers(priv, i);
dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
priv->dma_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_skbuff[i]);
}
priv->rx_skbuff[i] = NULL;
}
} }
static void dma_free_tx_skbufs(struct stmmac_priv *priv) static void dma_free_tx_skbufs(struct stmmac_priv *priv)
...@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) ...@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
init_dma_desc_rings(dev);
ret = init_dma_desc_rings(dev);
if (ret < 0) {
pr_err("%s: DMA descriptors initialization failed\n", __func__);
goto dma_desc_error;
}
/* DMA initialization and SW reset */ /* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv); ret = stmmac_init_dma_engine(priv);
if (ret < 0) { if (ret < 0) {
pr_err("%s: DMA initialization failed\n", __func__); pr_err("%s: DMA engine initialization failed\n", __func__);
goto init_error; goto init_error;
} }
...@@ -1672,6 +1744,7 @@ static int stmmac_open(struct net_device *dev) ...@@ -1672,6 +1744,7 @@ static int stmmac_open(struct net_device *dev)
init_error: init_error:
free_dma_desc_resources(priv); free_dma_desc_resources(priv);
dma_desc_error:
if (priv->phydev) if (priv->phydev)
phy_disconnect(priv->phydev); phy_disconnect(priv->phydev);
phy_error: phy_error:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment