Commit b76bc129 authored by Joe Damato's avatar Joe Damato Committed by Tony Nguyen

i40e: Add a stat for tracking busy rx pages

In some cases, pages cannot be reused by i40e because the page is busy. Add
a counter for this event.

Busy page count is accessible via ethtool.
Signed-off-by: default avatarJoe Damato <jdamato@fastly.com>
Tested-by: default avatarDave Switzer <david.switzer@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent cb963b98
...@@ -857,6 +857,7 @@ struct i40e_vsi { ...@@ -857,6 +857,7 @@ struct i40e_vsi {
u64 rx_page_reuse; u64 rx_page_reuse;
u64 rx_page_alloc; u64 rx_page_alloc;
u64 rx_page_waive; u64 rx_page_waive;
u64 rx_page_busy;
/* These are containers of ring pointers, allocated at run-time */ /* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings; struct i40e_ring **rx_rings;
......
...@@ -298,6 +298,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { ...@@ -298,6 +298,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse), I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc), I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
I40E_VSI_STAT("rx_cache_waive", rx_page_waive), I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
}; };
/* These PF_STATs might look like duplicates of some NETDEV_STATs, /* These PF_STATs might look like duplicates of some NETDEV_STATs,
......
...@@ -773,7 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb) ...@@ -773,7 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
**/ **/
static void i40e_update_vsi_stats(struct i40e_vsi *vsi) static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{ {
u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive; u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */ struct rtnl_link_stats64 *ns; /* netdev stats */
...@@ -809,6 +809,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -809,6 +809,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_reuse = 0; rx_reuse = 0;
rx_alloc = 0; rx_alloc = 0;
rx_waive = 0; rx_waive = 0;
rx_busy = 0;
rcu_read_lock(); rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) { for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */ /* locate Tx ring */
...@@ -845,6 +846,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -845,6 +846,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_reuse += p->rx_stats.page_reuse_count; rx_reuse += p->rx_stats.page_reuse_count;
rx_alloc += p->rx_stats.page_alloc_count; rx_alloc += p->rx_stats.page_alloc_count;
rx_waive += p->rx_stats.page_waive_count; rx_waive += p->rx_stats.page_waive_count;
rx_busy += p->rx_stats.page_busy_count;
if (i40e_enabled_xdp_vsi(vsi)) { if (i40e_enabled_xdp_vsi(vsi)) {
/* locate XDP ring */ /* locate XDP ring */
...@@ -875,6 +877,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -875,6 +877,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->rx_page_reuse = rx_reuse; vsi->rx_page_reuse = rx_reuse;
vsi->rx_page_alloc = rx_alloc; vsi->rx_page_alloc = rx_alloc;
vsi->rx_page_waive = rx_waive; vsi->rx_page_waive = rx_waive;
vsi->rx_page_busy = rx_busy;
ns->rx_packets = rx_p; ns->rx_packets = rx_p;
ns->rx_bytes = rx_b; ns->rx_bytes = rx_b;
......
...@@ -1990,8 +1990,8 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, ...@@ -1990,8 +1990,8 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
* pointing to; otherwise, the DMA mapping needs to be destroyed and * pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed. * page freed.
* *
* rx_stats will be updated to indicate if the page was waived because it was * rx_stats will be updated to indicate whether the page was waived
* not reusable. * or busy if it could not be reused.
*/ */
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
struct i40e_rx_queue_stats *rx_stats, struct i40e_rx_queue_stats *rx_stats,
...@@ -2008,13 +2008,17 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, ...@@ -2008,13 +2008,17 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
rx_stats->page_busy_count++;
return false; return false;
}
#else #else
#define I40E_LAST_OFFSET \ #define I40E_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
if (rx_buffer->page_offset > I40E_LAST_OFFSET) if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
rx_stats->page_busy_count++;
return false; return false;
}
#endif #endif
/* If we have drained the page fragment pool we need to update /* If we have drained the page fragment pool we need to update
......
...@@ -300,6 +300,7 @@ struct i40e_rx_queue_stats { ...@@ -300,6 +300,7 @@ struct i40e_rx_queue_stats {
u64 page_reuse_count; u64 page_reuse_count;
u64 page_alloc_count; u64 page_alloc_count;
u64 page_waive_count; u64 page_waive_count;
u64 page_busy_count;
}; };
enum i40e_ring_state_t { enum i40e_ring_state_t {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment