Commit 22087d65 authored by Christian Lamparter's avatar Christian Lamparter Committed by David S. Miller

net: emac: remove IBM_EMAC_RX_SKB_HEADROOM

The EMAC driver had a custom IBM_EMAC_RX_SKB_HEADROOM
Kconfig option that reserved additional skb headroom for RX.
This patch removes the option and migrates the code
to use napi_alloc_skb() and netdev_alloc_skb_ip_align()
in its place.
Signed-off-by: default avatarChristian Lamparter <chunkeey@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a6e11f6b
...@@ -28,18 +28,6 @@ config IBM_EMAC_RX_COPY_THRESHOLD ...@@ -28,18 +28,6 @@ config IBM_EMAC_RX_COPY_THRESHOLD
depends on IBM_EMAC depends on IBM_EMAC
default "256" default "256"
config IBM_EMAC_RX_SKB_HEADROOM
int "Additional RX skb headroom (bytes)"
depends on IBM_EMAC
default "0"
help
Additional receive skb headroom. Note, that driver
will always reserve at least 2 bytes to make IP header
aligned, so usually there is no need to add any additional
headroom.
If unsure, set to 0.
config IBM_EMAC_DEBUG config IBM_EMAC_DEBUG
bool "Debugging" bool "Debugging"
depends on IBM_EMAC depends on IBM_EMAC
......
...@@ -1071,7 +1071,9 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) ...@@ -1071,7 +1071,9 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
/* Second pass, allocate new skbs */ /* Second pass, allocate new skbs */
for (i = 0; i < NUM_RX_BUFF; ++i) { for (i = 0; i < NUM_RX_BUFF; ++i) {
struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC); struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size);
if (!skb) { if (!skb) {
ret = -ENOMEM; ret = -ENOMEM;
goto oom; goto oom;
...@@ -1080,10 +1082,10 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) ...@@ -1080,10 +1082,10 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
BUG_ON(!dev->rx_skb[i]); BUG_ON(!dev->rx_skb[i]);
dev_kfree_skb(dev->rx_skb[i]); dev_kfree_skb(dev->rx_skb[i]);
skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
dev->rx_desc[i].data_ptr = dev->rx_desc[i].data_ptr =
dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size, dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
DMA_FROM_DEVICE) + 2; rx_sync_size, DMA_FROM_DEVICE)
+ NET_IP_ALIGN;
dev->rx_skb[i] = skb; dev->rx_skb[i] = skb;
} }
skip: skip:
...@@ -1174,20 +1176,18 @@ static void emac_clean_rx_ring(struct emac_instance *dev) ...@@ -1174,20 +1176,18 @@ static void emac_clean_rx_ring(struct emac_instance *dev)
} }
} }
static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, static int
gfp_t flags) __emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot)
{ {
struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
dev->rx_skb[slot] = skb; dev->rx_skb[slot] = skb;
dev->rx_desc[slot].data_len = 0; dev->rx_desc[slot].data_len = 0;
skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
dev->rx_desc[slot].data_ptr = dev->rx_desc[slot].data_ptr =
dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size, dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
DMA_FROM_DEVICE) + 2; dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN;
wmb(); wmb();
dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
(slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
...@@ -1195,6 +1195,27 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, ...@@ -1195,6 +1195,27 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
return 0; return 0;
} }
static int
emac_alloc_rx_skb(struct emac_instance *dev, int slot)
{
struct sk_buff *skb;
skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size,
GFP_KERNEL);
return __emac_prepare_rx_skb(skb, dev, slot);
}
static int
emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot)
{
struct sk_buff *skb;
skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size);
return __emac_prepare_rx_skb(skb, dev, slot);
}
static void emac_print_link_status(struct emac_instance *dev) static void emac_print_link_status(struct emac_instance *dev)
{ {
if (netif_carrier_ok(dev->ndev)) if (netif_carrier_ok(dev->ndev))
...@@ -1225,7 +1246,7 @@ static int emac_open(struct net_device *ndev) ...@@ -1225,7 +1246,7 @@ static int emac_open(struct net_device *ndev)
/* Allocate RX ring */ /* Allocate RX ring */
for (i = 0; i < NUM_RX_BUFF; ++i) for (i = 0; i < NUM_RX_BUFF; ++i)
if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) { if (emac_alloc_rx_skb(dev, i)) {
printk(KERN_ERR "%s: failed to allocate RX ring\n", printk(KERN_ERR "%s: failed to allocate RX ring\n",
ndev->name); ndev->name);
goto oom; goto oom;
...@@ -1660,8 +1681,9 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot, ...@@ -1660,8 +1681,9 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
DBG2(dev, "recycle %d %d" NL, slot, len); DBG2(dev, "recycle %d %d" NL, slot, len);
if (len) if (len)
dma_map_single(&dev->ofdev->dev, skb->data - 2, dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN,
EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE); SKB_DATA_ALIGN(len + NET_IP_ALIGN),
DMA_FROM_DEVICE);
dev->rx_desc[slot].data_len = 0; dev->rx_desc[slot].data_len = 0;
wmb(); wmb();
...@@ -1713,7 +1735,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) ...@@ -1713,7 +1735,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
int len = dev->rx_desc[slot].data_len; int len = dev->rx_desc[slot].data_len;
int tot_len = dev->rx_sg_skb->len + len; int tot_len = dev->rx_sg_skb->len + len;
if (unlikely(tot_len + 2 > dev->rx_skb_size)) { if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) {
++dev->estats.rx_dropped_mtu; ++dev->estats.rx_dropped_mtu;
dev_kfree_skb(dev->rx_sg_skb); dev_kfree_skb(dev->rx_sg_skb);
dev->rx_sg_skb = NULL; dev->rx_sg_skb = NULL;
...@@ -1769,16 +1791,18 @@ static int emac_poll_rx(void *param, int budget) ...@@ -1769,16 +1791,18 @@ static int emac_poll_rx(void *param, int budget)
} }
if (len && len < EMAC_RX_COPY_THRESH) { if (len && len < EMAC_RX_COPY_THRESH) {
struct sk_buff *copy_skb = struct sk_buff *copy_skb;
alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
copy_skb = napi_alloc_skb(&dev->mal->napi, len);
if (unlikely(!copy_skb)) if (unlikely(!copy_skb))
goto oom; goto oom;
skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2); memcpy(copy_skb->data - NET_IP_ALIGN,
memcpy(copy_skb->data - 2, skb->data - 2, len + 2); skb->data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
emac_recycle_rx_skb(dev, slot, len); emac_recycle_rx_skb(dev, slot, len);
skb = copy_skb; skb = copy_skb;
} else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) } else if (unlikely(emac_alloc_rx_skb_napi(dev, slot)))
goto oom; goto oom;
skb_put(skb, len); skb_put(skb, len);
...@@ -1799,7 +1823,7 @@ static int emac_poll_rx(void *param, int budget) ...@@ -1799,7 +1823,7 @@ static int emac_poll_rx(void *param, int budget)
sg: sg:
if (ctrl & MAL_RX_CTRL_FIRST) { if (ctrl & MAL_RX_CTRL_FIRST) {
BUG_ON(dev->rx_sg_skb); BUG_ON(dev->rx_sg_skb);
if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) { if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) {
DBG(dev, "rx OOM %d" NL, slot); DBG(dev, "rx OOM %d" NL, slot);
++dev->estats.rx_dropped_oom; ++dev->estats.rx_dropped_oom;
emac_recycle_rx_skb(dev, slot, 0); emac_recycle_rx_skb(dev, slot, 0);
......
...@@ -68,22 +68,18 @@ static inline int emac_rx_size(int mtu) ...@@ -68,22 +68,18 @@ static inline int emac_rx_size(int mtu)
return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD); return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
} }
#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
#define EMAC_RX_SKB_HEADROOM \
EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
/* Size of RX skb for the given MTU */ /* Size of RX skb for the given MTU */
static inline int emac_rx_skb_size(int mtu) static inline int emac_rx_skb_size(int mtu)
{ {
int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu)); int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
return SKB_DATA_ALIGN(size + NET_IP_ALIGN) + NET_SKB_PAD;
} }
/* RX DMA sync size */ /* RX DMA sync size */
static inline int emac_rx_sync_size(int mtu) static inline int emac_rx_sync_size(int mtu)
{ {
return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2); return SKB_DATA_ALIGN(emac_rx_size(mtu) + NET_IP_ALIGN);
} }
/* Driver statistcs is split into two parts to make it more cache friendly: /* Driver statistcs is split into two parts to make it more cache friendly:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment