Commit 3b5dca26 authored by Jacob Keller's avatar Jacob Keller Committed by Jeff Kirsher

ixgbevf: add BP_EXTENDED_STATS for CONFIG_NET_RX_BUSY_POLL

This patch adds the extended statistics similar to the ixgbe driver. These
statistics keep track of how often the busy polling yields, as well as how many
packets are cleaned or missed by the polling routine.
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent c777cdfa
...@@ -74,6 +74,14 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { ...@@ -74,6 +74,14 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
zero_base)}, zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)}, zero_base)},
#ifdef BP_EXTENDED_STATS
{"rx_bp_poll_yield", IXGBEVF_STAT(bp_rx_yields, zero_base, zero_base)},
{"rx_bp_cleaned", IXGBEVF_STAT(bp_rx_cleaned, zero_base, zero_base)},
{"rx_bp_misses", IXGBEVF_STAT(bp_rx_missed, zero_base, zero_base)},
{"tx_bp_napi_yield", IXGBEVF_STAT(bp_tx_yields, zero_base, zero_base)},
{"tx_bp_cleaned", IXGBEVF_STAT(bp_tx_cleaned, zero_base, zero_base)},
{"tx_bp_misses", IXGBEVF_STAT(bp_tx_missed, zero_base, zero_base)},
#endif
}; };
#define IXGBE_QUEUE_STATS_LEN 0 #define IXGBE_QUEUE_STATS_LEN 0
...@@ -391,6 +399,30 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ...@@ -391,6 +399,30 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
int i; int i;
#ifdef BP_EXTENDED_STATS
u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_yields += adapter->rx_ring[i].bp_yields;
rx_cleaned += adapter->rx_ring[i].bp_cleaned;
rx_yields += adapter->rx_ring[i].bp_yields;
}
for (i = 0; i < adapter->num_tx_queues; i++) {
tx_yields += adapter->tx_ring[i].bp_yields;
tx_cleaned += adapter->tx_ring[i].bp_cleaned;
tx_yields += adapter->tx_ring[i].bp_yields;
}
adapter->bp_rx_yields = rx_yields;
adapter->bp_rx_cleaned = rx_cleaned;
adapter->bp_rx_missed = rx_missed;
adapter->bp_tx_yields = tx_yields;
adapter->bp_tx_cleaned = tx_cleaned;
adapter->bp_tx_missed = tx_missed;
#endif
ixgbevf_update_stats(adapter); ixgbevf_update_stats(adapter);
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h> #include <net/busy_poll.h>
#define BP_EXTENDED_STATS
#endif #endif
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
...@@ -80,6 +81,11 @@ struct ixgbevf_ring { ...@@ -80,6 +81,11 @@ struct ixgbevf_ring {
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 hw_csum_rx_good; u64 hw_csum_rx_good;
#ifdef BP_EXTENDED_STATS
u64 bp_yields;
u64 bp_misses;
u64 bp_cleaned;
#endif
u16 head; u16 head;
u16 tail; u16 tail;
...@@ -181,6 +187,9 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) ...@@ -181,6 +187,9 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
rc = false; rc = false;
#ifdef BP_EXTENDED_STATS
q_vector->tx.ring->bp_yields++;
#endif
} else { } else {
/* we don't care if someone yielded */ /* we don't care if someone yielded */
q_vector->state = IXGBEVF_QV_STATE_NAPI; q_vector->state = IXGBEVF_QV_STATE_NAPI;
...@@ -213,6 +222,9 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) ...@@ -213,6 +222,9 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
if ((q_vector->state & IXGBEVF_QV_LOCKED)) { if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
rc = false; rc = false;
#ifdef BP_EXTENDED_STATS
q_vector->rx.ring->bp_yields++;
#endif
} else { } else {
/* preserve yield marks */ /* preserve yield marks */
q_vector->state |= IXGBEVF_QV_STATE_POLL; q_vector->state |= IXGBEVF_QV_STATE_POLL;
...@@ -358,6 +370,16 @@ struct ixgbevf_adapter { ...@@ -358,6 +370,16 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count; unsigned int tx_ring_count;
unsigned int rx_ring_count; unsigned int rx_ring_count;
#ifdef BP_EXTENDED_STATS
u64 bp_rx_yields;
u64 bp_rx_cleaned;
u64 bp_rx_missed;
u64 bp_tx_yields;
u64 bp_tx_cleaned;
u64 bp_tx_missed;
#endif
u32 link_speed; u32 link_speed;
bool link_up; bool link_up;
......
...@@ -648,6 +648,12 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi) ...@@ -648,6 +648,12 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
ixgbevf_for_each_ring(ring, q_vector->rx) { ixgbevf_for_each_ring(ring, q_vector->rx) {
found = ixgbevf_clean_rx_irq(q_vector, ring, 4); found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
#ifdef BP_EXTENDED_STATS
if (found)
ring->bp_cleaned += found;
else
ring->bp_misses++;
#endif
if (found) if (found)
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment