Commit 793e3955 authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller

netvsc: account for packets/bytes transmitted after completion

Most drivers do not increment transmit statistics until after the
transmit is completed. This will also be necessary for BQL support.

Slight additional complexity because the netvsc driver aggregates
multiple packets into one transmit.
Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 46b4f7f5
...@@ -137,8 +137,10 @@ struct hv_netvsc_packet { ...@@ -137,8 +137,10 @@ struct hv_netvsc_packet {
u8 page_buf_cnt; u8 page_buf_cnt;
u16 q_idx; u16 q_idx;
u32 send_buf_index; u16 total_packets;
u32 total_bytes;
u32 send_buf_index;
u32 total_data_buflen; u32 total_data_buflen;
}; };
......
...@@ -611,15 +611,23 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, ...@@ -611,15 +611,23 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
/* Notify the layer above us */ /* Notify the layer above us */
if (likely(skb)) { if (likely(skb)) {
struct hv_netvsc_packet *nvsc_packet const struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb; = (struct hv_netvsc_packet *)skb->cb;
u32 send_index = nvsc_packet->send_buf_index; u32 send_index = packet->send_buf_index;
struct netvsc_stats *tx_stats;
if (send_index != NETVSC_INVALID_INDEX) if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index); netvsc_free_send_slot(net_device, send_index);
q_idx = nvsc_packet->q_idx; q_idx = packet->q_idx;
channel = incoming_channel; channel = incoming_channel;
tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets += packet->total_packets;
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
} }
...@@ -924,6 +932,11 @@ int netvsc_send(struct hv_device *device, ...@@ -924,6 +932,11 @@ int netvsc_send(struct hv_device *device,
packet->total_data_buflen += msd_len; packet->total_data_buflen += msd_len;
} }
if (msdp->pkt) {
packet->total_packets += msdp->pkt->total_packets;
packet->total_bytes += msdp->pkt->total_bytes;
}
if (msdp->skb) if (msdp->skb)
dev_consume_skb_any(msdp->skb); dev_consume_skb_any(msdp->skb);
......
...@@ -364,7 +364,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -364,7 +364,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 rndis_msg_size; u32 rndis_msg_size;
struct rndis_per_packet_info *ppi; struct rndis_per_packet_info *ppi;
u32 hash; u32 hash;
u32 skb_length;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf; struct hv_page_buffer *pb = page_buf;
...@@ -374,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -374,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* more pages we try linearizing it. * more pages we try linearizing it.
*/ */
skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2; num_data_pgs = netvsc_get_slots(skb) + 2;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
...@@ -407,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -407,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->q_idx = skb_get_queue_mapping(skb); packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len; packet->total_data_buflen = skb->len;
packet->total_bytes = skb->len;
packet->total_packets = 1;
rndis_msg = (struct rndis_message *)skb->head; rndis_msg = (struct rndis_message *)skb->head;
...@@ -517,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ...@@ -517,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet, ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb); rndis_msg, &pb, skb);
if (likely(ret == 0)) { if (likely(ret == 0))
struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets++;
tx_stats->bytes += skb_length;
u64_stats_update_end(&tx_stats->syncp);
return NETDEV_TX_OK; return NETDEV_TX_OK;
}
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy; ++net_device_ctx->eth_stats.tx_busy;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment