Commit 8c7396ae authored by Dai Haruki's avatar Dai Haruki Committed by David S. Miller

gianfar: Merge Tx and Rx interrupt for scheduling clean up ring

No clean up function is executed in the interrupt context by this patch.
Signed-off-by: default avatarDai Haruki <dai.haruki@freescale.com>
Acked-by: default avatarAndy Fleming <afleming@freescale.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c50a5d9a
...@@ -1604,29 +1604,19 @@ static int gfar_clean_tx_ring(struct net_device *dev) ...@@ -1604,29 +1604,19 @@ static int gfar_clean_tx_ring(struct net_device *dev)
return howmany; return howmany;
} }
/* Interrupt Handler for Transmit complete */ static void gfar_schedule_cleanup(struct net_device *dev)
static irqreturn_t gfar_transmit(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev); struct gfar_private *priv = netdev_priv(dev);
if (netif_rx_schedule_prep(dev, &priv->napi)) {
/* Clear IEVENT */ gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); __netif_rx_schedule(dev, &priv->napi);
/* Lock priv */
spin_lock(&priv->txlock);
gfar_clean_tx_ring(dev);
/* If we are coalescing the interrupts, reset the timer */
/* Otherwise, clear it */
if (likely(priv->txcoalescing)) {
gfar_write(&priv->regs->txic, 0);
gfar_write(&priv->regs->txic, priv->txic);
} }
}
spin_unlock(&priv->txlock); /* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *dev_id)
{
gfar_schedule_cleanup((struct net_device *)dev_id);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1713,28 +1703,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev) ...@@ -1713,28 +1703,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
irqreturn_t gfar_receive(int irq, void *dev_id) irqreturn_t gfar_receive(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *) dev_id; gfar_schedule_cleanup((struct net_device *)dev_id);
struct gfar_private *priv = netdev_priv(dev);
u32 tempval;
/* support NAPI */
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived */
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
if (netif_rx_schedule_prep(dev, &priv->napi)) {
tempval = gfar_read(&priv->regs->imask);
tempval &= IMASK_RTX_DISABLED;
gfar_write(&priv->regs->imask, tempval);
__netif_rx_schedule(dev, &priv->napi);
} else {
if (netif_msg_rx_err(priv))
printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
dev->name, gfar_read(&priv->regs->ievent),
gfar_read(&priv->regs->imask));
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1877,6 +1846,10 @@ static int gfar_poll(struct napi_struct *napi, int budget) ...@@ -1877,6 +1846,10 @@ static int gfar_poll(struct napi_struct *napi, int budget)
int howmany; int howmany;
unsigned long flags; unsigned long flags;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived */
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
/* If we fail to get the lock, don't bother with the TX BDs */ /* If we fail to get the lock, don't bother with the TX BDs */
if (spin_trylock_irqsave(&priv->txlock, flags)) { if (spin_trylock_irqsave(&priv->txlock, flags)) {
gfar_clean_tx_ring(dev); gfar_clean_tx_ring(dev);
...@@ -1899,6 +1872,10 @@ static int gfar_poll(struct napi_struct *napi, int budget) ...@@ -1899,6 +1872,10 @@ static int gfar_poll(struct napi_struct *napi, int budget)
gfar_write(&priv->regs->rxic, 0); gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic, priv->rxic); gfar_write(&priv->regs->rxic, priv->rxic);
} }
if (likely(priv->txcoalescing)) {
gfar_write(&priv->regs->txic, 0);
gfar_write(&priv->regs->txic, priv->txic);
}
} }
return howmany; return howmany;
......
...@@ -251,7 +251,7 @@ extern const char gfar_driver_version[]; ...@@ -251,7 +251,7 @@ extern const char gfar_driver_version[];
#define IEVENT_FIQ 0x00000004 #define IEVENT_FIQ 0x00000004
#define IEVENT_DPE 0x00000002 #define IEVENT_DPE 0x00000002
#define IEVENT_PERR 0x00000001 #define IEVENT_PERR 0x00000001
#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) #define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0 | IEVENT_BSY)
#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) #define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK) #define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
#define IEVENT_ERR_MASK \ #define IEVENT_ERR_MASK \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment