Commit 21828163 authored by Ayaz Abdulla's avatar Ayaz Abdulla Committed by Jeff Garzik

forcedeth: statistics optimization

This patch optimizes the data paths that can support hw counters. It
removes the sw counted statistics.

This is the last patch for the optimization set. Bumping up version of
driver.
Signed-Off-By: default avatarAyaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 57fff698
......@@ -111,6 +111,7 @@
* 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
* 0.58: 30 Oct 2006: Added support for sideband management unit.
* 0.59: 30 Oct 2006: Added support for recoverable error.
* 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
......@@ -127,7 +128,7 @@
#else
#define DRIVERNAPI
#endif
#define FORCEDETH_VERSION "0.59"
#define FORCEDETH_VERSION "0.60"
#define DRV_NAME "forcedeth"
#include <linux/module.h>
......@@ -1351,10 +1352,19 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
/* It seems that the nic always generates interrupts and doesn't
* accumulate errors internally. Thus the current values in np->stats
* are already up to date.
*/
/* If the nic supports hw counters then retrieve latest values */
if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
nv_get_hw_stats(dev);
/* copy to net_device stats */
np->stats.tx_bytes = np->estats.tx_bytes;
np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
np->stats.rx_crc_errors = np->estats.rx_crc_errors;
np->stats.rx_over_errors = np->estats.rx_over_errors;
np->stats.rx_errors = np->estats.rx_errors_total;
np->stats.tx_errors = np->estats.tx_errors_total;
}
return &np->stats;
}
......@@ -1944,16 +1954,8 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit)
np->get_tx_ctx->dma = 0;
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
if (flags & NV_TX2_UNDERFLOW)
np->stats.tx_fifo_errors++;
if (flags & NV_TX2_CARRIERLOST)
np->stats.tx_carrier_errors++;
np->stats.tx_errors++;
} else {
if (!(flags & NV_TX2_ERROR))
np->stats.tx_packets++;
np->stats.tx_bytes += np->get_tx_ctx->skb->len;
}
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
}
......@@ -2290,7 +2292,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
if (flags & NV_RX2_ERROR4) {
len = nv_getlen(dev, skb->data, len);
if (len < 0) {
np->stats.rx_errors++;
dev_kfree_skb(skb);
goto next_pkt;
}
......@@ -2303,11 +2304,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
if (flags & NV_RX2_CRCERR)
np->stats.rx_crc_errors++;
if (flags & NV_RX2_OVERFLOW)
np->stats.rx_over_errors++;
np->stats.rx_errors++;
dev_kfree_skb(skb);
goto next_pkt;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment