Commit cad5c197 authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller

netvsc: keep track of some non-fatal overload conditions

Add ethtool statistics for case where send chimmeny buffer is
exhausted and driver has to fall back to doing scatter/gather
send. Also, add statistic for case where ring buffer is full and
receive completions are delayed.
Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8b532797
...@@ -680,6 +680,8 @@ struct netvsc_ethtool_stats { ...@@ -680,6 +680,8 @@ struct netvsc_ethtool_stats {
unsigned long tx_no_space; unsigned long tx_no_space;
unsigned long tx_too_big; unsigned long tx_too_big;
unsigned long tx_busy; unsigned long tx_busy;
unsigned long tx_send_full;
unsigned long rx_comp_busy;
}; };
struct netvsc_vf_pcpu_stats { struct netvsc_vf_pcpu_stats {
......
...@@ -883,7 +883,9 @@ int netvsc_send(struct net_device_context *ndev_ctx, ...@@ -883,7 +883,9 @@ int netvsc_send(struct net_device_context *ndev_ctx,
} else if (pktlen + net_device->pkt_align < } else if (pktlen + net_device->pkt_align <
net_device->send_section_size) { net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device); section_index = netvsc_get_next_send_section(net_device);
if (section_index != NETVSC_INVALID_INDEX) { if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
++ndev_ctx->eth_stats.tx_send_full;
} else {
move_pkt_msd(&msd_send, &msd_skb, msdp); move_pkt_msd(&msd_send, &msd_skb, msdp);
msd_len = 0; msd_len = 0;
} }
...@@ -949,9 +951,10 @@ int netvsc_send(struct net_device_context *ndev_ctx, ...@@ -949,9 +951,10 @@ int netvsc_send(struct net_device_context *ndev_ctx,
} }
/* Send pending recv completions */ /* Send pending recv completions */
static int send_recv_completions(struct netvsc_channel *nvchan) static int send_recv_completions(struct net_device *ndev,
struct netvsc_device *nvdev,
struct netvsc_channel *nvchan)
{ {
struct netvsc_device *nvdev = nvchan->net_device;
struct multi_recv_comp *mrc = &nvchan->mrc; struct multi_recv_comp *mrc = &nvchan->mrc;
struct recv_comp_msg { struct recv_comp_msg {
struct nvsp_message_header hdr; struct nvsp_message_header hdr;
...@@ -969,8 +972,12 @@ static int send_recv_completions(struct netvsc_channel *nvchan) ...@@ -969,8 +972,12 @@ static int send_recv_completions(struct netvsc_channel *nvchan)
msg.status = rcd->status; msg.status = rcd->status;
ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
rcd->tid, VM_PKT_COMP, 0); rcd->tid, VM_PKT_COMP, 0);
if (unlikely(ret)) if (unlikely(ret)) {
struct net_device_context *ndev_ctx = netdev_priv(ndev);
++ndev_ctx->eth_stats.rx_comp_busy;
return ret; return ret;
}
if (++mrc->first == nvdev->recv_completion_cnt) if (++mrc->first == nvdev->recv_completion_cnt)
mrc->first = 0; mrc->first = 0;
...@@ -1011,7 +1018,7 @@ static void enq_receive_complete(struct net_device *ndev, ...@@ -1011,7 +1018,7 @@ static void enq_receive_complete(struct net_device *ndev,
recv_comp_slot_avail(nvdev, mrc, &filled, &avail); recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
if (unlikely(filled > NAPI_POLL_WEIGHT)) { if (unlikely(filled > NAPI_POLL_WEIGHT)) {
send_recv_completions(nvchan); send_recv_completions(ndev, nvdev, nvchan);
recv_comp_slot_avail(nvdev, mrc, &filled, &avail); recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
} }
...@@ -1194,7 +1201,7 @@ int netvsc_poll(struct napi_struct *napi, int budget) ...@@ -1194,7 +1201,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
* then re-enable host interrupts * then re-enable host interrupts
* and reschedule if ring is not empty. * and reschedule if ring is not empty.
*/ */
if (send_recv_completions(nvchan) == 0 && if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
work_done < budget && work_done < budget &&
napi_complete_done(napi, work_done) && napi_complete_done(napi, work_done) &&
hv_end_read(&channel->inbound)) { hv_end_read(&channel->inbound)) {
......
...@@ -1112,6 +1112,8 @@ static const struct { ...@@ -1112,6 +1112,8 @@ static const struct {
{ "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
{ "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
}, vf_stats[] = { }, vf_stats[] = {
{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
{ "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment