Commit ef468d23 authored by Rob Herring's avatar Rob Herring Committed by David S. Miller

net: calxedaxgmac: ip align receive buffers

On gcc 4.7, we will get alignment traps in the ip stack if we don't align
the ip headers on receive. The h/w can support this, so use ip aligned
allocations.

Cut down the unnecessary padding on the allocation. The buffer can start on
any byte alignment, but the size including the begining offset must be 8
byte aligned. So the h/w buffer size must include the NET_IP_ALIGN offset.

Thanks to Eric Dumazet for the initial patch highlighting the padding issues.
Signed-off-by: default avatarRob Herring <rob.herring@calxeda.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 97a3a9a6
...@@ -665,6 +665,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) ...@@ -665,6 +665,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
{ {
struct xgmac_dma_desc *p; struct xgmac_dma_desc *p;
dma_addr_t paddr; dma_addr_t paddr;
int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
int entry = priv->rx_head; int entry = priv->rx_head;
...@@ -673,13 +674,13 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) ...@@ -673,13 +674,13 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry; p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) { if (priv->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
if (unlikely(skb == NULL)) if (unlikely(skb == NULL))
break; break;
priv->rx_skbuff[entry] = skb; priv->rx_skbuff[entry] = skb;
paddr = dma_map_single(priv->device, skb->data, paddr = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz, DMA_FROM_DEVICE); bufsz, DMA_FROM_DEVICE);
desc_set_buf_addr(p, paddr, priv->dma_buf_sz); desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
} }
...@@ -703,10 +704,10 @@ static int xgmac_dma_desc_rings_init(struct net_device *dev) ...@@ -703,10 +704,10 @@ static int xgmac_dma_desc_rings_init(struct net_device *dev)
unsigned int bfsize; unsigned int bfsize;
/* Set the Buffer size according to the MTU; /* Set the Buffer size according to the MTU;
* indeed, in case of jumbo we need to bump-up the buffer sizes. * The total buffer size including any IP offset must be a multiple
* of 8 bytes.
*/ */
bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
64);
netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment