Commit 2a35efe5 authored by Emil Tantilov's avatar Emil Tantilov Committed by Jeff Kirsher

ixgbevf: add counters for Rx page allocations

We already had placehloders for failed page and buffer allocations.
Added alloc_rx_page and made sure the stats are properly updated and
exposed in ethtool.
Signed-off-by: default avatarEmil Tantilov <emil.s.tantilov@intel.com>
Tested-by: default avatarKrishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 35074d69
...@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { ...@@ -75,6 +75,9 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
IXGBEVF_NETDEV_STAT(multicast), IXGBEVF_NETDEV_STAT(multicast),
IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
}; };
#define IXGBEVF_QUEUE_STATS_LEN ( \ #define IXGBEVF_QUEUE_STATS_LEN ( \
......
...@@ -84,6 +84,7 @@ struct ixgbevf_tx_queue_stats { ...@@ -84,6 +84,7 @@ struct ixgbevf_tx_queue_stats {
struct ixgbevf_rx_queue_stats { struct ixgbevf_rx_queue_stats {
u64 alloc_rx_page_failed; u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
u64 alloc_rx_page;
u64 csum_err; u64 csum_err;
}; };
...@@ -295,8 +296,9 @@ struct ixgbevf_adapter { ...@@ -295,8 +296,9 @@ struct ixgbevf_adapter {
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources; u64 hw_rx_no_dma_resources;
int num_msix_vectors; int num_msix_vectors;
u32 alloc_rx_page_failed; u64 alloc_rx_page_failed;
u32 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
u64 alloc_rx_page;
struct msix_entry *msix_entries; struct msix_entry *msix_entries;
......
...@@ -604,7 +604,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, ...@@ -604,7 +604,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
if (dma_mapping_error(rx_ring->dev, dma)) { if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page); __free_page(page);
rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
return false; return false;
} }
...@@ -612,6 +612,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, ...@@ -612,6 +612,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
bi->page = page; bi->page = page;
bi->page_offset = 0; bi->page_offset = 0;
bi->pagecnt_bias = 1; bi->pagecnt_bias = 1;
rx_ring->rx_stats.alloc_rx_page++;
return true; return true;
} }
...@@ -963,8 +964,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -963,8 +964,10 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (!skb) if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break; break;
}
cleaned_count++; cleaned_count++;
...@@ -2749,6 +2752,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) ...@@ -2749,6 +2752,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 alloc_rx_page = 0, hw_csum_rx_error = 0;
int i; int i;
if (test_bit(__IXGBEVF_DOWN, &adapter->state) || if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
...@@ -2769,10 +2774,18 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) ...@@ -2769,10 +2774,18 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfmprc); adapter->stats.vfmprc);
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->hw_csum_rx_error += struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
adapter->rx_ring[i]->hw_csum_rx_error;
adapter->rx_ring[i]->hw_csum_rx_error = 0; hw_csum_rx_error += rx_ring->rx_stats.csum_err;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
} }
adapter->hw_csum_rx_error = hw_csum_rx_error;
adapter->alloc_rx_page_failed = alloc_rx_page_failed;
adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
adapter->alloc_rx_page = alloc_rx_page;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment