Commit 14870b75 authored by Esben Haabendal's avatar Esben Haabendal Committed by David S. Miller

net: gianfar: Implement rx_missed_errors counter

Devices with RMON support has a 16-bit RDRP counter.  It provides: "Receive
dropped packets counter. Increments for frames received which are streamed
to system but are later dropped due to lack of system resources."

To handle more than 2^16 dropped packets, a carry bit in CAR1 register is
set on overflow, so we enable irq when this is set, extending the counter
to 2^64 for handling situations where lots of packets are missed (e.g.
during heavy network storms).
Signed-off-by: default avatarEsben Haabendal <esben@geanix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8da32a10
...@@ -289,6 +289,29 @@ static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *s ...@@ -289,6 +289,29 @@ static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *s
stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes; stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
stats->tx_packets += priv->tx_queue[i]->stats.tx_packets; stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
} }
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
unsigned long flags;
u32 rdrp, car, car_before;
u64 rdrp_offset;
spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
car = gfar_read(&rmon->car1) & CAR1_C1RDR;
do {
car_before = car;
rdrp = gfar_read(&rmon->rdrp);
car = gfar_read(&rmon->car1) & CAR1_C1RDR;
} while (car != car_before);
if (car) {
priv->rmon_overflow.rdrp++;
gfar_write(&rmon->car1, car);
}
rdrp_offset = priv->rmon_overflow.rdrp;
spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
}
} }
/* Set the appropriate hash bit for the given addr */ /* Set the appropriate hash bit for the given addr */
...@@ -379,7 +402,8 @@ static void gfar_ints_enable(struct gfar_private *priv) ...@@ -379,7 +402,8 @@ static void gfar_ints_enable(struct gfar_private *priv)
for (i = 0; i < priv->num_grps; i++) { for (i = 0; i < priv->num_grps; i++) {
struct gfar __iomem *regs = priv->gfargrp[i].regs; struct gfar __iomem *regs = priv->gfargrp[i].regs;
/* Unmask the interrupts we look for */ /* Unmask the interrupts we look for */
gfar_write(&regs->imask, IMASK_DEFAULT); gfar_write(&regs->imask,
IMASK_DEFAULT | priv->rmon_overflow.imask);
} }
} }
...@@ -2287,7 +2311,7 @@ static irqreturn_t gfar_receive(int irq, void *grp_id) ...@@ -2287,7 +2311,7 @@ static irqreturn_t gfar_receive(int irq, void *grp_id)
if (likely(napi_schedule_prep(&grp->napi_rx))) { if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags); spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask); imask = gfar_read(&grp->regs->imask);
imask &= IMASK_RX_DISABLED; imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask); gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags); spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_rx); __napi_schedule(&grp->napi_rx);
...@@ -2311,7 +2335,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id) ...@@ -2311,7 +2335,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
if (likely(napi_schedule_prep(&grp->napi_tx))) { if (likely(napi_schedule_prep(&grp->napi_tx))) {
spin_lock_irqsave(&grp->grplock, flags); spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask); imask = gfar_read(&grp->regs->imask);
imask &= IMASK_TX_DISABLED; imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask); gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags); spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_tx); __napi_schedule(&grp->napi_tx);
...@@ -2682,6 +2706,18 @@ static irqreturn_t gfar_error(int irq, void *grp_id) ...@@ -2682,6 +2706,18 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
} }
netif_dbg(priv, tx_err, dev, "Transmit Error\n"); netif_dbg(priv, tx_err, dev, "Transmit Error\n");
} }
if (events & IEVENT_MSRO) {
struct rmon_mib __iomem *rmon = &regs->rmon;
u32 car;
spin_lock(&priv->rmon_overflow.lock);
car = gfar_read(&rmon->car1) & CAR1_C1RDR;
if (car) {
priv->rmon_overflow.rdrp++;
gfar_write(&rmon->car1, car);
}
spin_unlock(&priv->rmon_overflow.lock);
}
if (events & IEVENT_BSY) { if (events & IEVENT_BSY) {
dev->stats.rx_over_errors++; dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy); atomic64_inc(&priv->extra_stats.rx_bsy);
...@@ -3259,6 +3295,14 @@ static int gfar_probe(struct platform_device *ofdev) ...@@ -3259,6 +3295,14 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_hw_init(priv); gfar_hw_init(priv);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
spin_lock_init(&priv->rmon_overflow.lock);
priv->rmon_overflow.imask = IMASK_MSRO;
gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
}
/* Carrier starts down, phylib will bring it up */ /* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev); netif_carrier_off(dev);
......
...@@ -663,6 +663,15 @@ struct rmon_mib ...@@ -663,6 +663,15 @@ struct rmon_mib
u32 cam2; /* 0x.73c - Carry Mask Register Two */ u32 cam2; /* 0x.73c - Carry Mask Register Two */
}; };
struct rmon_overflow {
/* lock for synchronization of the rdrp field of this struct, and
* CAR1/CAR2 registers
*/
spinlock_t lock;
u32 imask;
u64 rdrp;
};
struct gfar_extra_stats { struct gfar_extra_stats {
atomic64_t rx_alloc_err; atomic64_t rx_alloc_err;
atomic64_t rx_large; atomic64_t rx_large;
...@@ -1150,6 +1159,7 @@ struct gfar_private { ...@@ -1150,6 +1159,7 @@ struct gfar_private {
/* Network Statistics */ /* Network Statistics */
struct gfar_extra_stats extra_stats; struct gfar_extra_stats extra_stats;
struct rmon_overflow rmon_overflow;
/* PHY stuff */ /* PHY stuff */
phy_interface_t interface; phy_interface_t interface;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment