Commit 1488fc20 authored by Aleksander Jan Bajkowski's avatar Aleksander Jan Bajkowski Committed by Jakub Kicinski

net: lantiq_xrx200: increase buffer reservation

If the user sets a lower mtu on the CPU port than on the switch,
then DMA inserts a few more bytes into the buffer than expected.
In the worst case, it may exceed the size of the buffer. The
experiments showed that the buffer should be a multiple of the
burst length value. This patch rounds the length of the rx buffer
upwards and fixes this bug. The reservation of FCS space in the
buffer has been removed as PMAC strips the FCS.

Fixes: 998ac358 ("net: lantiq: add support for jumbo frames")
Reported-by: default avatarThomas Nixon <tom@tomn.co.uk>
Signed-off-by: default avatarAleksander Jan Bajkowski <olek2@wp.pl>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 14193d57
...@@ -71,6 +71,8 @@ struct xrx200_priv { ...@@ -71,6 +71,8 @@ struct xrx200_priv {
struct xrx200_chan chan_tx; struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx; struct xrx200_chan chan_rx;
u16 rx_buf_size;
struct net_device *net_dev; struct net_device *net_dev;
struct device *dev; struct device *dev;
...@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set, ...@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
xrx200_pmac_w32(priv, val, offset); xrx200_pmac_w32(priv, val, offset);
} }
static int xrx200_max_frame_len(int mtu)
{
return VLAN_ETH_HLEN + mtu;
}
static int xrx200_buffer_size(int mtu)
{
return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
}
/* drop all the packets from the DMA ring */ /* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch) static void xrx200_flush_dma(struct xrx200_chan *ch)
{ {
...@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch) ...@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break; break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
(ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ch->priv->rx_buf_size;
ETH_FCS_LEN);
ch->dma.desc++; ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM; ch->dma.desc %= LTQ_DESC_NUM;
} }
...@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev) ...@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch) static int xrx200_alloc_skb(struct xrx200_chan *ch)
{ {
int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc]; struct sk_buff *skb = ch->skb[ch->dma.desc];
struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping; dma_addr_t mapping;
int ret = 0; int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
len); priv->rx_buf_size);
if (!ch->skb[ch->dma.desc]) { if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM; ret = -ENOMEM;
goto skip; goto skip;
} }
mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
len, DMA_FROM_DEVICE); priv->rx_buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { if (unlikely(dma_mapping_error(priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]); dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb; ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM; ret = -ENOMEM;
...@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch) ...@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
wmb(); wmb();
skip: skip:
ch->dma.desc_base[ch->dma.desc].ctl = ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len; LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
return ret; return ret;
} }
...@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu) ...@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
int ret = 0; int ret = 0;
net_dev->mtu = new_mtu; net_dev->mtu = new_mtu;
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
if (new_mtu <= old_mtu) if (new_mtu <= old_mtu)
return ret; return ret;
...@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu) ...@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
ret = xrx200_alloc_skb(ch_rx); ret = xrx200_alloc_skb(ch_rx);
if (ret) { if (ret) {
net_dev->mtu = old_mtu; net_dev->mtu = old_mtu;
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
break; break;
} }
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev) ...@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops; net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev); SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN; net_dev->min_mtu = ETH_ZLEN;
net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN; net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
/* load the memory ranges */ /* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment