Commit 4867d750 authored by David S. Miller's avatar David S. Miller

Merge branch 'mneta-page_pool_get_stats'

Lorenzo Bianconi says:

====================
net: mvneta: add support for page_pool_get_stats

Introduce page_pool stats ethtool APIs in order to avoid driver duplicated
code.

Changes since v4:
- rebase on top of net-next

Changes since v3:
- get rid of wrong for loop in page_pool_ethtool_stats_get()
- add API stubs when page_pool_stats are not compiled in

Changes since v2:
- remove enum list of page_pool stats in page_pool.h
- remove leftover change in mvneta.c for ethtool_stats array allocation

Changes since v1:
- move stats accounting to page_pool code
- move stats string management to page_pool code
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents edf45f00 b3fc7922
...@@ -62,6 +62,7 @@ config MVNETA ...@@ -62,6 +62,7 @@ config MVNETA
select MVMDIO select MVMDIO
select PHYLINK select PHYLINK
select PAGE_POOL select PAGE_POOL
select PAGE_POOL_STATS
help help
This driver supports the network interface units in the This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
......
...@@ -4735,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, ...@@ -4735,6 +4735,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN, memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN); mvneta_statistics[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
page_pool_ethtool_stats_get_strings(data);
} }
} }
...@@ -4847,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) ...@@ -4847,6 +4850,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
} }
} }
static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
{
struct page_pool_stats stats = {};
int i;
for (i = 0; i < rxq_number; i++)
page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
page_pool_ethtool_stats_get(data, &stats);
}
static void mvneta_ethtool_get_stats(struct net_device *dev, static void mvneta_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
...@@ -4857,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev, ...@@ -4857,12 +4871,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i]; *data++ = pp->ethtool_stats[i];
mvneta_ethtool_pp_stats(pp, data);
} }
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{ {
if (sset == ETH_SS_STATS) if (sset == ETH_SS_STATS)
return ARRAY_SIZE(mvneta_statistics); return ARRAY_SIZE(mvneta_statistics) +
page_pool_ethtool_stats_get_count();
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -117,6 +117,10 @@ struct page_pool_stats { ...@@ -117,6 +117,10 @@ struct page_pool_stats {
struct page_pool_recycle_stats recycle_stats; struct page_pool_recycle_stats recycle_stats;
}; };
int page_pool_ethtool_stats_get_count(void);
u8 *page_pool_ethtool_stats_get_strings(u8 *data);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
/* /*
* Drivers that wish to harvest page pool stats and report them to users * Drivers that wish to harvest page pool stats and report them to users
* (perhaps via ethtool, debugfs, or another mechanism) can allocate a * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
...@@ -124,6 +128,23 @@ struct page_pool_stats { ...@@ -124,6 +128,23 @@ struct page_pool_stats {
*/ */
bool page_pool_get_stats(struct page_pool *pool, bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats); struct page_pool_stats *stats);
#else
static inline int page_pool_ethtool_stats_get_count(void)
{
return 0;
}
static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
{
return data;
}
static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
{
return data;
}
#endif #endif
struct page_pool { struct page_pool {
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/page-flags.h> #include <linux/page-flags.h>
#include <linux/mm.h> /* for __put_page() */ #include <linux/mm.h> /* for __put_page() */
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/ethtool.h>
#include <trace/events/page_pool.h> #include <trace/events/page_pool.h>
...@@ -42,6 +43,20 @@ ...@@ -42,6 +43,20 @@
this_cpu_add(s->__stat, val); \ this_cpu_add(s->__stat, val); \
} while (0) } while (0)
static const char pp_stats[][ETH_GSTRING_LEN] = {
"rx_pp_alloc_fast",
"rx_pp_alloc_slow",
"rx_pp_alloc_slow_ho",
"rx_pp_alloc_empty",
"rx_pp_alloc_refill",
"rx_pp_alloc_waive",
"rx_pp_recycle_cached",
"rx_pp_recycle_cache_full",
"rx_pp_recycle_ring",
"rx_pp_recycle_ring_full",
"rx_pp_recycle_released_ref",
};
bool page_pool_get_stats(struct page_pool *pool, bool page_pool_get_stats(struct page_pool *pool,
struct page_pool_stats *stats) struct page_pool_stats *stats)
{ {
...@@ -50,7 +65,13 @@ bool page_pool_get_stats(struct page_pool *pool, ...@@ -50,7 +65,13 @@ bool page_pool_get_stats(struct page_pool *pool,
if (!stats) if (!stats)
return false; return false;
memcpy(&stats->alloc_stats, &pool->alloc_stats, sizeof(pool->alloc_stats)); /* The caller is responsible to initialize stats. */
stats->alloc_stats.fast += pool->alloc_stats.fast;
stats->alloc_stats.slow += pool->alloc_stats.slow;
stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
stats->alloc_stats.empty += pool->alloc_stats.empty;
stats->alloc_stats.refill += pool->alloc_stats.refill;
stats->alloc_stats.waive += pool->alloc_stats.waive;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const struct page_pool_recycle_stats *pcpu = const struct page_pool_recycle_stats *pcpu =
...@@ -66,6 +87,46 @@ bool page_pool_get_stats(struct page_pool *pool, ...@@ -66,6 +87,46 @@ bool page_pool_get_stats(struct page_pool *pool,
return true; return true;
} }
EXPORT_SYMBOL(page_pool_get_stats); EXPORT_SYMBOL(page_pool_get_stats);
u8 *page_pool_ethtool_stats_get_strings(u8 *data)
{
int i;
for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
return data;
}
EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
int page_pool_ethtool_stats_get_count(void)
{
return ARRAY_SIZE(pp_stats);
}
EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
{
struct page_pool_stats *pool_stats = stats;
*data++ = pool_stats->alloc_stats.fast;
*data++ = pool_stats->alloc_stats.slow;
*data++ = pool_stats->alloc_stats.slow_high_order;
*data++ = pool_stats->alloc_stats.empty;
*data++ = pool_stats->alloc_stats.refill;
*data++ = pool_stats->alloc_stats.waive;
*data++ = pool_stats->recycle_stats.cached;
*data++ = pool_stats->recycle_stats.cache_full;
*data++ = pool_stats->recycle_stats.ring;
*data++ = pool_stats->recycle_stats.ring_full;
*data++ = pool_stats->recycle_stats.released_refcnt;
return data;
}
EXPORT_SYMBOL(page_pool_ethtool_stats_get);
#else #else
#define alloc_stat_inc(pool, __stat) #define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat) #define recycle_stat_inc(pool, __stat)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment