Commit 8d5009f6 authored by Sergei Shtylyov's avatar Sergei Shtylyov Committed by David S. Miller

sh_eth: fold netif_msg_*() and netdev_*() calls into netif_*() invocations

Now that we call netdev_*() under netif_msg_*() checks, we can fold these into
netif_*() macro invocations.
Suggested-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarSergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent da246855
...@@ -1557,8 +1557,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) ...@@ -1557,8 +1557,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Unused write back interrupt */ /* Unused write back interrupt */
if (intr_status & EESR_TABT) { /* Transmit Abort int */ if (intr_status & EESR_TABT) { /* Transmit Abort int */
ndev->stats.tx_aborted_errors++; ndev->stats.tx_aborted_errors++;
if (netif_msg_tx_err(mdp)) netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
netdev_err(ndev, "Transmit Abort\n");
} }
} }
...@@ -1567,45 +1566,38 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) ...@@ -1567,45 +1566,38 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (intr_status & EESR_RFRMER) { if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */ /* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++; ndev->stats.rx_frame_errors++;
if (netif_msg_rx_err(mdp)) netif_err(mdp, rx_err, ndev, "Receive Abort\n");
netdev_err(ndev, "Receive Abort\n");
} }
} }
if (intr_status & EESR_TDE) { if (intr_status & EESR_TDE) {
/* Transmit Descriptor Empty int */ /* Transmit Descriptor Empty int */
ndev->stats.tx_fifo_errors++; ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp)) netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
netdev_err(ndev, "Transmit Descriptor Empty\n");
} }
if (intr_status & EESR_TFE) { if (intr_status & EESR_TFE) {
/* FIFO under flow */ /* FIFO under flow */
ndev->stats.tx_fifo_errors++; ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp)) netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
netdev_err(ndev, "Transmit FIFO Under flow\n");
} }
if (intr_status & EESR_RDE) { if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */ /* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++; ndev->stats.rx_over_errors++;
netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
if (netif_msg_rx_err(mdp))
netdev_err(ndev, "Receive Descriptor Empty\n");
} }
if (intr_status & EESR_RFE) { if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */ /* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++; ndev->stats.rx_fifo_errors++;
if (netif_msg_rx_err(mdp)) netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
netdev_err(ndev, "Receive FIFO Overflow\n");
} }
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
/* Address Error */ /* Address Error */
ndev->stats.tx_fifo_errors++; ndev->stats.tx_fifo_errors++;
if (netif_msg_tx_err(mdp)) netif_err(mdp, tx_err, ndev, "Address Error\n");
netdev_err(ndev, "Address Error\n");
} }
mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
...@@ -2064,11 +2056,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev) ...@@ -2064,11 +2056,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
netif_stop_queue(ndev); netif_stop_queue(ndev);
if (netif_msg_timer(mdp)) { netif_err(mdp, timer, ndev,
netdev_err(ndev, "transmit timed out, status %8.8x, resetting...\n",
"transmit timed out, status %8.8x, resetting...\n", (int)sh_eth_read(ndev, EESR));
(int)sh_eth_read(ndev, EESR));
}
/* tx_errors count up */ /* tx_errors count up */
ndev->stats.tx_errors++; ndev->stats.tx_errors++;
...@@ -2103,8 +2093,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -2103,8 +2093,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&mdp->lock, flags); spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) { if (!sh_eth_txfree(ndev)) {
if (netif_msg_tx_queued(mdp)) netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
netdev_warn(ndev, "TxFD exhausted.\n");
netif_stop_queue(ndev); netif_stop_queue(ndev);
spin_unlock_irqrestore(&mdp->lock, flags); spin_unlock_irqrestore(&mdp->lock, flags);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment