Commit ce45b873 authored by Breno Leitao's avatar Breno Leitao Committed by David S. Miller

ehea: Fixing statistics

(Applied over Eric's "ehea: fix use after free" patch)

Currently ehea stats are broken. The bytes counters are got from
the hardware, while the packets counters are got from the device
driver. Also, the device driver counters are resetted during the
the down process, and the hardware aren't, causing some weird
numbers.

This patch just consolidates the packets and bytes on the device
driver.
Signed-off-by: default avatarBreno Leitao <leitao@linux.vnet.ibm.com>
Reviewed-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a71fb881
...@@ -396,7 +396,9 @@ struct ehea_port_res { ...@@ -396,7 +396,9 @@ struct ehea_port_res {
int swqe_ll_count; int swqe_ll_count;
u32 swqe_id_counter; u32 swqe_id_counter;
u64 tx_packets; u64 tx_packets;
u64 tx_bytes;
u64 rx_packets; u64 rx_packets;
u64 rx_bytes;
u32 poll_counter; u32 poll_counter;
struct net_lro_mgr lro_mgr; struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
......
...@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) ...@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
struct ehea_port *port = netdev_priv(dev); struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats; struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2; struct hcp_ehea_port_cb2 *cb2;
u64 hret, rx_packets, tx_packets; u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i; int i;
memset(stats, 0, sizeof(*stats)); memset(stats, 0, sizeof(*stats));
...@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) ...@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0; rx_packets = 0;
for (i = 0; i < port->num_def_qps; i++) for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets; rx_packets += port->port_res[i].rx_packets;
rx_bytes += port->port_res[i].rx_bytes;
}
tx_packets = 0; tx_packets = 0;
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets; tx_packets += port->port_res[i].tx_packets;
tx_bytes += port->port_res[i].tx_bytes;
}
stats->tx_packets = tx_packets; stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp; stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr; stats->rx_errors = cb2->rxuerr;
stats->rx_bytes = cb2->rxo; stats->rx_bytes = rx_bytes;
stats->tx_bytes = cb2->txo; stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets; stats->rx_packets = rx_packets;
out_herr: out_herr:
...@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev, ...@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
int skb_arr_rq2_len = pr->rq2_skba.len; int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len; int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3; int processed, processed_rq1, processed_rq2, processed_rq3;
u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset; int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
...@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev, ...@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
processed_rq3++; processed_rq3++;
} }
processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb); ehea_proc_skb(pr, cqe, skb);
} else { } else {
pr->p_stats.poll_receive_errors++; pr->p_stats.poll_receive_errors++;
...@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev, ...@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
lro_flush_all(&pr->lro_mgr); lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed; pr->rx_packets += processed;
pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1); ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2); ehea_refill_rq2(pr, processed_rq2);
...@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, ...@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
enum ehea_eq_type eq_type = EHEA_EQ; enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL; struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO; int ret = -EIO;
u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
tx_bytes = pr->tx_bytes;
tx_packets = pr->tx_packets;
rx_bytes = pr->rx_bytes;
rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res)); memset(pr, 0, sizeof(struct ehea_port_res));
pr->tx_bytes = rx_bytes;
pr->tx_packets = tx_packets;
pr->rx_bytes = rx_bytes;
pr->rx_packets = rx_packets;
pr->port = port; pr->port = port;
spin_lock_init(&pr->xmit_lock); spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue); spin_lock_init(&pr->netif_queue);
...@@ -2254,6 +2272,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2254,6 +2272,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
swqe->vlan_tag = vlan_tx_tag_get(skb); swqe->vlan_tag = vlan_tx_tag_get(skb);
} }
pr->tx_packets++;
pr->tx_bytes += skb->len;
if (skb->len <= SWQE3_MAX_IMM) { if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv; u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter; u32 swqe_num = pr->swqe_id_counter;
...@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
ehea_post_swqe(pr->qp, swqe); ehea_post_swqe(pr->qp, swqe);
pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags); spin_lock_irqsave(&pr->netif_queue, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment