Commit bd092ad1 authored by David S. Miller's avatar David S. Miller

Merge branch 'remove-__napi_complete_done'

Eric Dumazet says:

====================
net: get rid of __napi_complete()

This patch series removes __napi_complete() calls, in an effort
to make NAPI API simpler and generalize GRO and napi_complete_done()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3976001c 02c1602e
...@@ -1008,7 +1008,7 @@ static int greth_poll(struct napi_struct *napi, int budget) ...@@ -1008,7 +1008,7 @@ static int greth_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&greth->devlock, flags); spin_unlock_irqrestore(&greth->devlock, flags);
goto restart_txrx_poll; goto restart_txrx_poll;
} else { } else {
__napi_complete(napi); napi_complete_done(napi, work_done);
spin_unlock_irqrestore(&greth->devlock, flags); spin_unlock_irqrestore(&greth->devlock, flags);
} }
} }
......
...@@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) ...@@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
void __iomem *mmio = lp->mmio; void __iomem *mmio = lp->mmio;
struct sk_buff *skb,*new_skb; struct sk_buff *skb,*new_skb;
int min_pkt_len, status; int min_pkt_len, status;
unsigned int intr0;
int num_rx_pkt = 0; int num_rx_pkt = 0;
short pkt_len; short pkt_len;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
short vtag; short vtag;
#endif #endif
int rx_pkt_limit = budget;
unsigned long flags;
if (rx_pkt_limit <= 0) while (num_rx_pkt < budget) {
goto rx_not_empty; status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
if (status & OWN_BIT)
break;
do{ /* There is a tricky error noted by John Murphy,
/* process receive packets until we use the quota. * <murf@perftech.com> to Russ Nelson: Even with
* If we own the next entry, it's a new packet. Send it up. * full-sized * buffers it's possible for a
* jabber packet to use two buffers, with only
* the last correctly noting the error.
*/ */
while(1) { if (status & ERR_BIT) {
status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); /* resetting flags */
if (status & OWN_BIT) lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
break; goto err_next_pkt;
}
/* There is a tricky error noted by John Murphy, /* check for STP and ENP */
* <murf@perftech.com> to Russ Nelson: Even with if (!((status & STP_BIT) && (status & ENP_BIT))){
* full-sized * buffers it's possible for a /* resetting flags */
* jabber packet to use two buffers, with only lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
* the last correctly noting the error. goto err_next_pkt;
*/ }
if(status & ERR_BIT) { pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
/* check for STP and ENP */
if(!((status & STP_BIT) && (status & ENP_BIT))){
/* resetting flags */
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
goto err_next_pkt;
}
pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
vtag = status & TT_MASK; vtag = status & TT_MASK;
/*MAC will strip vlan tag*/ /* MAC will strip vlan tag */
if (vtag != 0) if (vtag != 0)
min_pkt_len =MIN_PKT_LEN - 4; min_pkt_len = MIN_PKT_LEN - 4;
else else
#endif #endif
min_pkt_len =MIN_PKT_LEN; min_pkt_len = MIN_PKT_LEN;
if (pkt_len < min_pkt_len) { if (pkt_len < min_pkt_len) {
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
lp->drv_rx_errors++; lp->drv_rx_errors++;
goto err_next_pkt; goto err_next_pkt;
} }
if(--rx_pkt_limit < 0) new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
goto rx_not_empty; if (!new_skb) {
new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); /* if allocation fail,
if (!new_skb) { * ignore that pkt and go to next one
/* if allocation fail, */
* ignore that pkt and go to next one lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
*/ lp->drv_rx_errors++;
lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; goto err_next_pkt;
lp->drv_rx_errors++; }
goto err_next_pkt;
}
skb_reserve(new_skb, 2); skb_reserve(new_skb, 2);
skb = lp->rx_skbuff[rx_index]; skb = lp->rx_skbuff[rx_index];
pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
lp->rx_skbuff[rx_index] = new_skb; lp->rx_skbuff[rx_index] = new_skb;
lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
new_skb->data, new_skb->data,
lp->rx_buff_len-2, lp->rx_buff_len-2,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
#if AMD8111E_VLAN_TAG_USED #if AMD8111E_VLAN_TAG_USED
if (vtag == TT_VLAN_TAGGED){ if (vtag == TT_VLAN_TAGGED){
u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
}
#endif
netif_receive_skb(skb);
/*COAL update rx coalescing parameters*/
lp->coal_conf.rx_packets++;
lp->coal_conf.rx_bytes += pkt_len;
num_rx_pkt++;
err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2);
wmb();
lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
} }
/* Check the interrupt status register for more packets in the #endif
* mean time. Process them since we have not used up our quota. napi_gro_receive(napi, skb);
*/ /* COAL update rx coalescing parameters */
intr0 = readl(mmio + INT0); lp->coal_conf.rx_packets++;
/*Ack receive packets */ lp->coal_conf.rx_bytes += pkt_len;
writel(intr0 & RINT0,mmio + INT0); num_rx_pkt++;
err_next_pkt:
lp->rx_ring[rx_index].buff_phy_addr
= cpu_to_le32(lp->rx_dma_addr[rx_index]);
lp->rx_ring[rx_index].buff_count =
cpu_to_le16(lp->rx_buff_len-2);
wmb();
lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
}
} while(intr0 & RINT0); if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
unsigned long flags;
if (rx_pkt_limit > 0) {
/* Receive descriptor is empty now */ /* Receive descriptor is empty now */
spin_lock_irqsave(&lp->lock, flags); spin_lock_irqsave(&lp->lock, flags);
__napi_complete(napi);
writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL0|RINTEN0, mmio + INTEN0);
writel(VAL2 | RDMD0, mmio + CMD0); writel(VAL2 | RDMD0, mmio + CMD0);
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
} }
rx_not_empty:
return num_rx_pkt; return num_rx_pkt;
} }
......
...@@ -1350,13 +1350,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) ...@@ -1350,13 +1350,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
pcnet32_restart(dev, CSR0_START); pcnet32_restart(dev, CSR0_START);
netif_wake_queue(dev); netif_wake_queue(dev);
} }
spin_unlock_irqrestore(&lp->lock, flags);
if (work_done < budget) {
spin_lock_irqsave(&lp->lock, flags);
__napi_complete(napi);
if (work_done < budget && napi_complete_done(napi, work_done)) {
/* clear interrupt masks */ /* clear interrupt masks */
val = lp->a->read_csr(ioaddr, CSR3); val = lp->a->read_csr(ioaddr, CSR3);
val &= 0x00ff; val &= 0x00ff;
...@@ -1364,9 +1359,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget) ...@@ -1364,9 +1359,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
/* Set interrupt enable. */ /* Set interrupt enable. */
lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
spin_unlock_irqrestore(&lp->lock, flags);
} }
spin_unlock_irqrestore(&lp->lock, flags);
return work_done; return work_done;
} }
......
...@@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d ...@@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
pr_info("mdio write timed out\n"); pr_info("mdio write timed out\n");
} }
static int ep93xx_rx(struct net_device *dev, int processed, int budget) static int ep93xx_rx(struct net_device *dev, int budget)
{ {
struct ep93xx_priv *ep = netdev_priv(dev); struct ep93xx_priv *ep = netdev_priv(dev);
int processed = 0;
while (processed < budget) { while (processed < budget) {
int entry; int entry;
...@@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) ...@@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
skb_put(skb, length); skb_put(skb, length);
skb->protocol = eth_type_trans(skb, dev); skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb(skb); napi_gro_receive(&ep->napi, skb);
dev->stats.rx_packets++; dev->stats.rx_packets++;
dev->stats.rx_bytes += length; dev->stats.rx_bytes += length;
...@@ -310,35 +311,17 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget) ...@@ -310,35 +311,17 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
return processed; return processed;
} }
static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
{
struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
}
static int ep93xx_poll(struct napi_struct *napi, int budget) static int ep93xx_poll(struct napi_struct *napi, int budget)
{ {
struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi); struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
struct net_device *dev = ep->dev; struct net_device *dev = ep->dev;
int rx = 0; int rx;
poll_some_more:
rx = ep93xx_rx(dev, rx, budget);
if (rx < budget) {
int more = 0;
rx = ep93xx_rx(dev, budget);
if (rx < budget && napi_complete_done(napi, rx)) {
spin_lock_irq(&ep->rx_lock); spin_lock_irq(&ep->rx_lock);
__napi_complete(napi);
wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
if (ep93xx_have_more_rx(ep)) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
more = 1;
}
spin_unlock_irq(&ep->rx_lock); spin_unlock_irq(&ep->rx_lock);
if (more && napi_reschedule(napi))
goto poll_some_more;
} }
if (rx) { if (rx) {
......
...@@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget) ...@@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget)
int n; int n;
if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags))) if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
continue; continue;
n = mc->ops->poll_rx(mc->dev, budget); n = mc->ops->poll_rx(mc->dev, budget - received);
if (n) { if (n) {
received += n; received += n;
budget -= n; if (received >= budget)
if (budget <= 0) return budget;
goto more_work; // XXX What if this is the last one ?
} }
} }
/* We need to disable IRQs to protect from RXDE IRQ here */ if (napi_complete_done(napi, received)) {
spin_lock_irqsave(&mal->lock, flags); /* We need to disable IRQs to protect from RXDE IRQ here */
__napi_complete(napi); spin_lock_irqsave(&mal->lock, flags);
mal_enable_eob_irq(mal); mal_enable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags); spin_unlock_irqrestore(&mal->lock, flags);
}
/* Check for "rotting" packet(s) */ /* Check for "rotting" packet(s) */
list_for_each(l, &mal->poll_list) { list_for_each(l, &mal->poll_list) {
......
...@@ -3201,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev) ...@@ -3201,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev)
} }
} }
static int skge_poll(struct napi_struct *napi, int to_do) static int skge_poll(struct napi_struct *napi, int budget)
{ {
struct skge_port *skge = container_of(napi, struct skge_port, napi); struct skge_port *skge = container_of(napi, struct skge_port, napi);
struct net_device *dev = skge->netdev; struct net_device *dev = skge->netdev;
...@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do) ...@@ -3214,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
struct skge_rx_desc *rd = e->desc; struct skge_rx_desc *rd = e->desc;
struct sk_buff *skb; struct sk_buff *skb;
u32 control; u32 control;
...@@ -3236,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do) ...@@ -3236,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do)
wmb(); wmb();
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
if (work_done < to_do) { if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags; unsigned long flags;
napi_gro_flush(napi, false);
spin_lock_irqsave(&hw->hw_lock, flags); spin_lock_irqsave(&hw->hw_lock, flags);
__napi_complete(napi);
hw->intr_mask |= napimask[skge->port]; hw->intr_mask |= napimask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask); skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK); skge_read32(hw, B0_IMSK);
......
...@@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) ...@@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
/* Relinquish the SKB to the network layer */ /* Relinquish the SKB to the network layer */
skb_put(skb, pktlen); skb_put(skb, pktlen);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb); napi_gro_receive(&ksp->napi, skb);
/* Record stats */ /* Record stats */
ndev->stats.rx_packets++; ndev->stats.rx_packets++;
...@@ -561,18 +561,17 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget) ...@@ -561,18 +561,17 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
static int ks8695_poll(struct napi_struct *napi, int budget) static int ks8695_poll(struct napi_struct *napi, int budget)
{ {
struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi); struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
unsigned long work_done;
unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN); unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp); unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
int work_done;
work_done = ks8695_rx(ksp, budget); work_done = ks8695_rx(ksp, budget);
if (work_done < budget) { if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ksp->rx_lock, flags); spin_lock_irqsave(&ksp->rx_lock, flags);
__napi_complete(napi); /* enable rx interrupt */
/*enable rx interrupt*/
writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN); writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
spin_unlock_irqrestore(&ksp->rx_lock, flags); spin_unlock_irqrestore(&ksp->rx_lock, flags);
} }
......
...@@ -2025,7 +2025,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, ...@@ -2025,7 +2025,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev); skb->protocol = eth_type_trans(skb, qdev->ndev);
netif_receive_skb(skb); napi_gro_receive(&qdev->napi, skb);
lrg_buf_cb2->skb = NULL; lrg_buf_cb2->skb = NULL;
if (qdev->device_id == QL3022_DEVICE_ID) if (qdev->device_id == QL3022_DEVICE_ID)
...@@ -2095,7 +2095,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, ...@@ -2095,7 +2095,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
} }
skb2->protocol = eth_type_trans(skb2, qdev->ndev); skb2->protocol = eth_type_trans(skb2, qdev->ndev);
netif_receive_skb(skb2); napi_gro_receive(&qdev->napi, skb2);
ndev->stats.rx_packets++; ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length; ndev->stats.rx_bytes += length;
lrg_buf_cb2->skb = NULL; lrg_buf_cb2->skb = NULL;
...@@ -2105,8 +2105,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, ...@@ -2105,8 +2105,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
} }
static int ql_tx_rx_clean(struct ql3_adapter *qdev, static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
int *tx_cleaned, int *rx_cleaned, int work_to_do)
{ {
struct net_rsp_iocb *net_rsp; struct net_rsp_iocb *net_rsp;
struct net_device *ndev = qdev->ndev; struct net_device *ndev = qdev->ndev;
...@@ -2114,7 +2113,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, ...@@ -2114,7 +2113,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
/* While there are entries in the completion queue. */ /* While there are entries in the completion queue. */
while ((le32_to_cpu(*(qdev->prsp_producer_index)) != while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
qdev->rsp_consumer_index) && (work_done < work_to_do)) { qdev->rsp_consumer_index) && (work_done < budget)) {
net_rsp = qdev->rsp_current; net_rsp = qdev->rsp_current;
rmb(); rmb();
...@@ -2130,21 +2129,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, ...@@ -2130,21 +2129,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
case OPCODE_OB_MAC_IOCB_FN2: case OPCODE_OB_MAC_IOCB_FN2:
ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
net_rsp); net_rsp);
(*tx_cleaned)++;
break; break;
case OPCODE_IB_MAC_IOCB: case OPCODE_IB_MAC_IOCB:
case OPCODE_IB_3032_MAC_IOCB: case OPCODE_IB_3032_MAC_IOCB:
ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
net_rsp); net_rsp);
(*rx_cleaned)++; work_done++;
break; break;
case OPCODE_IB_IP_IOCB: case OPCODE_IB_IP_IOCB:
case OPCODE_IB_3032_IP_IOCB: case OPCODE_IB_3032_IP_IOCB:
ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
net_rsp); net_rsp);
(*rx_cleaned)++; work_done++;
break; break;
default: { default: {
u32 *tmp = (u32 *)net_rsp; u32 *tmp = (u32 *)net_rsp;
...@@ -2169,7 +2167,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, ...@@ -2169,7 +2167,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
qdev->rsp_current++; qdev->rsp_current++;
} }
work_done = *tx_cleaned + *rx_cleaned;
} }
return work_done; return work_done;
...@@ -2178,25 +2175,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, ...@@ -2178,25 +2175,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
static int ql_poll(struct napi_struct *napi, int budget) static int ql_poll(struct napi_struct *napi, int budget)
{ {
struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
int rx_cleaned = 0, tx_cleaned = 0;
unsigned long hw_flags;
struct ql3xxx_port_registers __iomem *port_regs = struct ql3xxx_port_registers __iomem *port_regs =
qdev->mem_map_registers; qdev->mem_map_registers;
int work_done;
ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); work_done = ql_tx_rx_clean(qdev, budget);
if (tx_cleaned + rx_cleaned != budget) { if (work_done < budget && napi_complete_done(napi, work_done)) {
spin_lock_irqsave(&qdev->hw_lock, hw_flags); unsigned long flags;
__napi_complete(napi);
spin_lock_irqsave(&qdev->hw_lock, flags);
ql_update_small_bufq_prod_index(qdev); ql_update_small_bufq_prod_index(qdev);
ql_update_lrg_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev);
writel(qdev->rsp_consumer_index, writel(qdev->rsp_consumer_index,
&port_regs->CommonRegs.rspQConsumerIndex); &port_regs->CommonRegs.rspQConsumerIndex);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); spin_unlock_irqrestore(&qdev->hw_lock, flags);
ql_enable_interrupts(qdev); ql_enable_interrupts(qdev);
} }
return tx_cleaned + rx_cleaned; return work_done;
} }
static irqreturn_t ql3xxx_isr(int irq, void *dev_id) static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
......
...@@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) ...@@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
struct cp_private *cp = container_of(napi, struct cp_private, napi); struct cp_private *cp = container_of(napi, struct cp_private, napi);
struct net_device *dev = cp->dev; struct net_device *dev = cp->dev;
unsigned int rx_tail = cp->rx_tail; unsigned int rx_tail = cp->rx_tail;
int rx; int rx = 0;
rx = 0;
rx_status_loop:
cpw16(IntrStatus, cp_rx_intr_mask); cpw16(IntrStatus, cp_rx_intr_mask);
while (rx < budget) { while (rx < budget) {
...@@ -556,15 +554,10 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) ...@@ -556,15 +554,10 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
/* if we did not reach work limit, then we're done with /* if we did not reach work limit, then we're done with
* this round of polling * this round of polling
*/ */
if (rx < budget) { if (rx < budget && napi_complete_done(napi, rx)) {
unsigned long flags; unsigned long flags;
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
napi_gro_flush(napi, false);
spin_lock_irqsave(&cp->lock, flags); spin_lock_irqsave(&cp->lock, flags);
__napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask); cpw16_f(IntrMask, cp_intr_mask);
spin_unlock_irqrestore(&cp->lock, flags); spin_unlock_irqrestore(&cp->lock, flags);
} }
......
...@@ -2135,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget) ...@@ -2135,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
if (likely(RTL_R16(IntrStatus) & RxAckBits)) if (likely(RTL_R16(IntrStatus) & RxAckBits))
work_done += rtl8139_rx(dev, tp, budget); work_done += rtl8139_rx(dev, tp, budget);
if (work_done < budget) { if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags; unsigned long flags;
/*
* Order is important since data can get interrupted
* again when we think we are done.
*/
spin_lock_irqsave(&tp->lock, flags); spin_lock_irqsave(&tp->lock, flags);
__napi_complete(napi);
RTL_W16_F(IntrMask, rtl8139_intr_mask); RTL_W16_F(IntrMask, rtl8139_intr_mask);
spin_unlock_irqrestore(&tp->lock, flags); spin_unlock_irqrestore(&tp->lock, flags);
} }
......
...@@ -264,7 +264,6 @@ struct epic_private { ...@@ -264,7 +264,6 @@ struct epic_private {
spinlock_t lock; /* Group with Tx control cache line. */ spinlock_t lock; /* Group with Tx control cache line. */
spinlock_t napi_lock; spinlock_t napi_lock;
struct napi_struct napi; struct napi_struct napi;
unsigned int reschedule_in_poll;
unsigned int cur_tx, dirty_tx; unsigned int cur_tx, dirty_tx;
unsigned int cur_rx, dirty_rx; unsigned int cur_rx, dirty_rx;
...@@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&ep->lock); spin_lock_init(&ep->lock);
spin_lock_init(&ep->napi_lock); spin_lock_init(&ep->napi_lock);
ep->reschedule_in_poll = 0;
/* Bring the chip out of low-power mode. */ /* Bring the chip out of low-power mode. */
ew32(GENCTL, 0x4200); ew32(GENCTL, 0x4200);
...@@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance) ...@@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
handled = 1; handled = 1;
if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { if (status & EpicNapiEvent) {
spin_lock(&ep->napi_lock); spin_lock(&ep->napi_lock);
if (napi_schedule_prep(&ep->napi)) { if (napi_schedule_prep(&ep->napi)) {
epic_napi_irq_off(dev, ep); epic_napi_irq_off(dev, ep);
__napi_schedule(&ep->napi); __napi_schedule(&ep->napi);
} else }
ep->reschedule_in_poll++;
spin_unlock(&ep->napi_lock); spin_unlock(&ep->napi_lock);
} }
status &= ~EpicNapiEvent; status &= ~EpicNapiEvent;
...@@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget) ...@@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget)
{ {
struct epic_private *ep = container_of(napi, struct epic_private, napi); struct epic_private *ep = container_of(napi, struct epic_private, napi);
struct net_device *dev = ep->mii.dev; struct net_device *dev = ep->mii.dev;
int work_done = 0;
void __iomem *ioaddr = ep->ioaddr; void __iomem *ioaddr = ep->ioaddr;
int work_done;
rx_action:
epic_tx(dev, ep); epic_tx(dev, ep);
work_done += epic_rx(dev, budget); work_done = epic_rx(dev, budget);
epic_rx_err(dev, ep); epic_rx_err(dev, ep);
if (work_done < budget) { if (work_done < budget && napi_complete_done(napi, work_done)) {
unsigned long flags; unsigned long flags;
int more;
/* A bit baroque but it avoids a (space hungry) spin_unlock */
spin_lock_irqsave(&ep->napi_lock, flags); spin_lock_irqsave(&ep->napi_lock, flags);
more = ep->reschedule_in_poll; ew32(INTSTAT, EpicNapiEvent);
if (!more) { epic_napi_irq_on(dev, ep);
__napi_complete(napi);
ew32(INTSTAT, EpicNapiEvent);
epic_napi_irq_on(dev, ep);
} else
ep->reschedule_in_poll--;
spin_unlock_irqrestore(&ep->napi_lock, flags); spin_unlock_irqrestore(&ep->napi_lock, flags);
if (more)
goto rx_action;
} }
return work_done; return work_done;
......
...@@ -463,7 +463,6 @@ static inline bool napi_reschedule(struct napi_struct *napi) ...@@ -463,7 +463,6 @@ static inline bool napi_reschedule(struct napi_struct *napi)
return false; return false;
} }
bool __napi_complete(struct napi_struct *n);
bool napi_complete_done(struct napi_struct *n, int work_done); bool napi_complete_done(struct napi_struct *n, int work_done);
/** /**
* napi_complete - NAPI processing complete * napi_complete - NAPI processing complete
......
...@@ -4883,23 +4883,6 @@ void __napi_schedule_irqoff(struct napi_struct *n) ...@@ -4883,23 +4883,6 @@ void __napi_schedule_irqoff(struct napi_struct *n)
} }
EXPORT_SYMBOL(__napi_schedule_irqoff); EXPORT_SYMBOL(__napi_schedule_irqoff);
bool __napi_complete(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
/* Some drivers call us directly, instead of calling
* napi_complete_done().
*/
if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
return false;
list_del_init(&n->poll_list);
smp_mb__before_atomic();
clear_bit(NAPI_STATE_SCHED, &n->state);
return true;
}
EXPORT_SYMBOL(__napi_complete);
bool napi_complete_done(struct napi_struct *n, int work_done) bool napi_complete_done(struct napi_struct *n, int work_done)
{ {
unsigned long flags; unsigned long flags;
...@@ -4926,14 +4909,13 @@ bool napi_complete_done(struct napi_struct *n, int work_done) ...@@ -4926,14 +4909,13 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
else else
napi_gro_flush(n, false); napi_gro_flush(n, false);
} }
if (likely(list_empty(&n->poll_list))) { if (unlikely(!list_empty(&n->poll_list))) {
WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
} else {
/* If n->poll_list is not empty, we need to mask irqs */ /* If n->poll_list is not empty, we need to mask irqs */
local_irq_save(flags); local_irq_save(flags);
__napi_complete(n); list_del_init(&n->poll_list);
local_irq_restore(flags); local_irq_restore(flags);
} }
WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
return true; return true;
} }
EXPORT_SYMBOL(napi_complete_done); EXPORT_SYMBOL(napi_complete_done);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment