Commit 66812da3 authored by Tobias Klauser's avatar Tobias Klauser Committed by Greg Kroah-Hartman

staging: octeon: Use net_device_stats from struct net_device

Instead of using a private copy of struct net_device_stats in
struct octeon_ethernet, use stats from struct net_device. Also remove
the now unnecessary .ndo_get_stats function.
Signed-off-by: default avatarTobias Klauser <tklauser@distanz.ch>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 548d3506
...@@ -356,8 +356,8 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) ...@@ -356,8 +356,8 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
/* Increment RX stats for virtual ports */ /* Increment RX stats for virtual ports */
if (port >= CVMX_PIP_NUM_INPUT_PORTS) { if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
priv->stats.rx_packets++; dev->stats.rx_packets++;
priv->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
} }
netif_receive_skb(skb); netif_receive_skb(skb);
} else { } else {
...@@ -365,7 +365,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) ...@@ -365,7 +365,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
* Drop any packet received for a device that * Drop any packet received for a device that
* isn't up. * isn't up.
*/ */
priv->stats.rx_dropped++; dev->stats.rx_dropped++;
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
} }
} else { } else {
......
...@@ -460,7 +460,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -460,7 +460,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
case QUEUE_DROP: case QUEUE_DROP:
skb->next = to_free_list; skb->next = to_free_list;
to_free_list = skb; to_free_list = skb;
priv->stats.tx_dropped++; dev->stats.tx_dropped++;
break; break;
case QUEUE_HW: case QUEUE_HW:
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
...@@ -535,7 +535,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) ...@@ -535,7 +535,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!work)) { if (unlikely(!work)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n", printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name); dev->name);
priv->stats.tx_dropped++; dev->stats.tx_dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return 0; return 0;
} }
...@@ -546,7 +546,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) ...@@ -546,7 +546,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
printk_ratelimited("%s: Failed to allocate a packet buffer\n", printk_ratelimited("%s: Failed to allocate a packet buffer\n",
dev->name); dev->name);
cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
priv->stats.tx_dropped++; dev->stats.tx_dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return 0; return 0;
} }
...@@ -663,8 +663,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) ...@@ -663,8 +663,8 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Submit the packet to the POW */ /* Submit the packet to the POW */
cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type, cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work)); cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
priv->stats.tx_packets++; dev->stats.tx_packets++;
priv->stats.tx_bytes += skb->len; dev->stats.tx_bytes += skb->len;
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
return 0; return 0;
} }
......
...@@ -228,17 +228,17 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) ...@@ -228,17 +228,17 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
cvmx_pko_get_port_status(priv->port, 1, &tx_status); cvmx_pko_get_port_status(priv->port, 1, &tx_status);
} }
priv->stats.rx_packets += rx_status.inb_packets; dev->stats.rx_packets += rx_status.inb_packets;
priv->stats.tx_packets += tx_status.packets; dev->stats.tx_packets += tx_status.packets;
priv->stats.rx_bytes += rx_status.inb_octets; dev->stats.rx_bytes += rx_status.inb_octets;
priv->stats.tx_bytes += tx_status.octets; dev->stats.tx_bytes += tx_status.octets;
priv->stats.multicast += rx_status.multicast_packets; dev->stats.multicast += rx_status.multicast_packets;
priv->stats.rx_crc_errors += rx_status.inb_errors; dev->stats.rx_crc_errors += rx_status.inb_errors;
priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
priv->stats.rx_dropped += rx_status.dropped_packets; dev->stats.rx_dropped += rx_status.dropped_packets;
} }
return &priv->stats; return &dev->stats;
} }
/** /**
......
...@@ -38,8 +38,6 @@ struct octeon_ethernet { ...@@ -38,8 +38,6 @@ struct octeon_ethernet {
int imode; int imode;
/* List of outstanding tx buffers per queue */ /* List of outstanding tx buffers per queue */
struct sk_buff_head tx_free_list[16]; struct sk_buff_head tx_free_list[16];
/* Device statistics */
struct net_device_stats stats;
unsigned int last_speed; unsigned int last_speed;
unsigned int last_link; unsigned int last_link;
/* Last negotiated link state */ /* Last negotiated link state */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment