Commit 4fcc3d34 authored by Sonic Zhang's avatar Sonic Zhang Committed by David S. Miller

netdev:bfin_mac: reclaim and free tx skb as soon as possible after transfer

SKBs hold onto resources that can't be held indefinitely, such as TCP
socket references and netfilter conntrack state.  So if a packet is left
in TX ring for a long time, there might be a TCP socket that cannot be
closed and freed up.

Current blackfin EMAC driver always reclaim and free used tx skbs in future
transfers. The problem is that future transfer may not come as soon as
possible. This patch start a timer after transfer to reclaim and free skb.
There is nearly no performance drop with this patch.

TX interrupt is not enabled because of a strange behavior of the Blackfin EMAC.
If EMAC TX transfer control is turned on, endless TX interrupts are triggered
no matter if TX DMA is enabled or not. Since DMA walks down the ring automatically,
TX transfer control can't be turned off in the middle. The only way is to disable
TX interrupt completely.
Signed-off-by: default avatarSonic Zhang <sonic.zhang@analog.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent aa1039e7
...@@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev) ...@@ -922,61 +922,73 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
# define bfin_tx_hwtstamp(dev, skb) # define bfin_tx_hwtstamp(dev, skb)
#endif #endif
static void adjust_tx_list(void) static inline void _tx_reclaim_skb(void)
{
do {
tx_list_head->desc_a.config &= ~DMAEN;
tx_list_head->status.status_word = 0;
if (tx_list_head->skb) {
dev_kfree_skb(tx_list_head->skb);
tx_list_head->skb = NULL;
}
tx_list_head = tx_list_head->next;
} while (tx_list_head->status.status_word != 0);
}
static void tx_reclaim_skb(struct bfin_mac_local *lp)
{ {
int timeout_cnt = MAX_TIMEOUT_CNT; int timeout_cnt = MAX_TIMEOUT_CNT;
if (tx_list_head->status.status_word != 0 && if (tx_list_head->status.status_word != 0)
current_tx_ptr != tx_list_head) { _tx_reclaim_skb();
goto adjust_head; /* released something, just return; */
}
/* if (current_tx_ptr->next == tx_list_head) {
* if nothing released, check wait condition
* current's next can not be the head,
* otherwise the dma will not stop as we want
*/
if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) { while (tx_list_head->status.status_word == 0) {
/* slow down polling to avoid too many queue stop. */
udelay(10); udelay(10);
if (tx_list_head->status.status_word != 0 || /* reclaim skb if DMA is not running. */
!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)) { if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
goto adjust_head; break;
} if (timeout_cnt-- < 0)
if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME
": wait for adjust tx list head timeout\n");
break; break;
}
}
if (tx_list_head->status.status_word != 0) {
goto adjust_head;
} }
if (timeout_cnt >= 0)
_tx_reclaim_skb();
else
netif_stop_queue(lp->ndev);
} }
return; if (current_tx_ptr->next != tx_list_head &&
netif_queue_stopped(lp->ndev))
netif_wake_queue(lp->ndev);
if (tx_list_head != current_tx_ptr) {
/* shorten the timer interval if tx queue is stopped */
if (netif_queue_stopped(lp->ndev))
lp->tx_reclaim_timer.expires =
jiffies + (TX_RECLAIM_JIFFIES >> 4);
else
lp->tx_reclaim_timer.expires =
jiffies + TX_RECLAIM_JIFFIES;
mod_timer(&lp->tx_reclaim_timer,
lp->tx_reclaim_timer.expires);
}
adjust_head:
do {
tx_list_head->desc_a.config &= ~DMAEN;
tx_list_head->status.status_word = 0;
if (tx_list_head->skb) {
dev_kfree_skb(tx_list_head->skb);
tx_list_head->skb = NULL;
} else {
printk(KERN_ERR DRV_NAME
": no sk_buff in a transmitted frame!\n");
}
tx_list_head = tx_list_head->next;
} while (tx_list_head->status.status_word != 0 &&
current_tx_ptr != tx_list_head);
return; return;
}
static void tx_reclaim_skb_timeout(unsigned long lp)
{
tx_reclaim_skb((struct bfin_mac_local *)lp);
} }
static int bfin_mac_hard_start_xmit(struct sk_buff *skb, static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
struct bfin_mac_local *lp = netdev_priv(dev);
u16 *data; u16 *data;
u32 data_align = (unsigned long)(skb->data) & 0x3; u32 data_align = (unsigned long)(skb->data) & 0x3;
union skb_shared_tx *shtx = skb_tx(skb); union skb_shared_tx *shtx = skb_tx(skb);
...@@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, ...@@ -1009,8 +1021,6 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
skb->len); skb->len);
current_tx_ptr->desc_a.start_addr = current_tx_ptr->desc_a.start_addr =
(u32)current_tx_ptr->packet; (u32)current_tx_ptr->packet;
if (current_tx_ptr->status.status_word != 0)
current_tx_ptr->status.status_word = 0;
blackfin_dcache_flush_range( blackfin_dcache_flush_range(
(u32)current_tx_ptr->packet, (u32)current_tx_ptr->packet,
(u32)(current_tx_ptr->packet + skb->len + 2)); (u32)(current_tx_ptr->packet + skb->len + 2));
...@@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, ...@@ -1022,6 +1032,9 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
*/ */
SSYNC(); SSYNC();
/* always clear status buffer before start tx dma */
current_tx_ptr->status.status_word = 0;
/* enable this packet's dma */ /* enable this packet's dma */
current_tx_ptr->desc_a.config |= DMAEN; current_tx_ptr->desc_a.config |= DMAEN;
...@@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb, ...@@ -1037,13 +1050,14 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE); bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
out: out:
adjust_tx_list();
bfin_tx_hwtstamp(dev, skb); bfin_tx_hwtstamp(dev, skb);
current_tx_ptr = current_tx_ptr->next; current_tx_ptr = current_tx_ptr->next;
dev->stats.tx_packets++; dev->stats.tx_packets++;
dev->stats.tx_bytes += (skb->len); dev->stats.tx_bytes += (skb->len);
tx_reclaim_skb(lp);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1167,8 +1181,11 @@ static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id) ...@@ -1167,8 +1181,11 @@ static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
static void bfin_mac_poll(struct net_device *dev) static void bfin_mac_poll(struct net_device *dev)
{ {
struct bfin_mac_local *lp = netdev_priv(dev);
disable_irq(IRQ_MAC_RX); disable_irq(IRQ_MAC_RX);
bfin_mac_interrupt(IRQ_MAC_RX, dev); bfin_mac_interrupt(IRQ_MAC_RX, dev);
tx_reclaim_skb(lp);
enable_irq(IRQ_MAC_RX); enable_irq(IRQ_MAC_RX);
} }
#endif /* CONFIG_NET_POLL_CONTROLLER */ #endif /* CONFIG_NET_POLL_CONTROLLER */
...@@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void) ...@@ -1232,12 +1249,27 @@ static int bfin_mac_enable(void)
/* Our watchdog timed out. Called by the networking layer */ /* Our watchdog timed out. Called by the networking layer */
static void bfin_mac_timeout(struct net_device *dev) static void bfin_mac_timeout(struct net_device *dev)
{ {
struct bfin_mac_local *lp = netdev_priv(dev);
pr_debug("%s: %s\n", dev->name, __func__); pr_debug("%s: %s\n", dev->name, __func__);
bfin_mac_disable(); bfin_mac_disable();
/* reset tx queue */ del_timer(&lp->tx_reclaim_timer);
tx_list_tail = tx_list_head->next;
/* reset tx queue and free skb */
while (tx_list_head != current_tx_ptr) {
tx_list_head->desc_a.config &= ~DMAEN;
tx_list_head->status.status_word = 0;
if (tx_list_head->skb) {
dev_kfree_skb(tx_list_head->skb);
tx_list_head->skb = NULL;
}
tx_list_head = tx_list_head->next;
}
if (netif_queue_stopped(lp->ndev))
netif_wake_queue(lp->ndev);
bfin_mac_enable(); bfin_mac_enable();
...@@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) ...@@ -1430,6 +1462,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev); SET_NETDEV_DEV(ndev, &pdev->dev);
platform_set_drvdata(pdev, ndev); platform_set_drvdata(pdev, ndev);
lp = netdev_priv(ndev); lp = netdev_priv(ndev);
lp->ndev = ndev;
/* Grab the MAC address in the MAC */ /* Grab the MAC address in the MAC */
*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
...@@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) ...@@ -1485,6 +1518,10 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
ndev->netdev_ops = &bfin_mac_netdev_ops; ndev->netdev_ops = &bfin_mac_netdev_ops;
ndev->ethtool_ops = &bfin_mac_ethtool_ops; ndev->ethtool_ops = &bfin_mac_ethtool_ops;
init_timer(&lp->tx_reclaim_timer);
lp->tx_reclaim_timer.data = (unsigned long)lp;
lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
spin_lock_init(&lp->lock); spin_lock_init(&lp->lock);
/* now, enable interrupts */ /* now, enable interrupts */
......
...@@ -13,9 +13,12 @@ ...@@ -13,9 +13,12 @@
#include <linux/net_tstamp.h> #include <linux/net_tstamp.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/timecompare.h> #include <linux/timecompare.h>
#include <linux/timer.h>
#define BFIN_MAC_CSUM_OFFLOAD #define BFIN_MAC_CSUM_OFFLOAD
#define TX_RECLAIM_JIFFIES (HZ / 5)
struct dma_descriptor { struct dma_descriptor {
struct dma_descriptor *next_dma_desc; struct dma_descriptor *next_dma_desc;
unsigned long start_addr; unsigned long start_addr;
...@@ -68,6 +71,8 @@ struct bfin_mac_local { ...@@ -68,6 +71,8 @@ struct bfin_mac_local {
int wol; /* Wake On Lan */ int wol; /* Wake On Lan */
int irq_wake_requested; int irq_wake_requested;
struct timer_list tx_reclaim_timer;
struct net_device *ndev;
/* MII and PHY stuffs */ /* MII and PHY stuffs */
int old_link; /* used by bf537_adjust_link */ int old_link; /* used by bf537_adjust_link */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment