Commit 92df9f8a authored by David S. Miller's avatar David S. Miller

Merge branch 'mvneta-xdp-ethtool-stats'

Lorenzo Bianconi says:

====================
add xdp ethtool stats to mvneta driver

Rework mvneta stats accounting in order to introduce xdp ethtool
statistics in the mvneta driver.
Introduce xdp_redirect, xdp_pass, xdp_drop and xdp_tx counters to
ethtool statistics.
Fix skb_alloc_error and refill_error ethtool accounting
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c1b18f20 6c8a8cfd
......@@ -341,6 +341,10 @@ enum {
ETHTOOL_STAT_EEE_WAKEUP,
ETHTOOL_STAT_SKB_ALLOC_ERR,
ETHTOOL_STAT_REFILL_ERR,
ETHTOOL_XDP_REDIRECT,
ETHTOOL_XDP_PASS,
ETHTOOL_XDP_DROP,
ETHTOOL_XDP_TX,
ETHTOOL_MAX_STATS,
};
......@@ -354,10 +358,10 @@ struct mvneta_statistic {
#define T_REG_64 64
#define T_SW 1
#define MVNETA_XDP_PASS BIT(0)
#define MVNETA_XDP_DROPPED BIT(1)
#define MVNETA_XDP_TX BIT(2)
#define MVNETA_XDP_REDIR BIT(3)
#define MVNETA_XDP_PASS 0
#define MVNETA_XDP_DROPPED BIT(0)
#define MVNETA_XDP_TX BIT(1)
#define MVNETA_XDP_REDIR BIT(2)
static const struct mvneta_statistic mvneta_statistics[] = {
{ 0x3000, T_REG_64, "good_octets_received", },
......@@ -395,16 +399,36 @@ static const struct mvneta_statistic mvneta_statistics[] = {
{ ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
{ ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
{ ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
{ ETHTOOL_XDP_REDIRECT, T_SW, "xdp_redirect", },
{ ETHTOOL_XDP_PASS, T_SW, "xdp_pass", },
{ ETHTOOL_XDP_DROP, T_SW, "xdp_drop", },
{ ETHTOOL_XDP_TX, T_SW, "xdp_tx", },
};
struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
struct mvneta_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
u64 rx_errors;
u64 tx_packets;
u64 tx_bytes;
/* xdp */
u64 xdp_redirect;
u64 xdp_pass;
u64 xdp_drop;
u64 xdp_tx;
};
struct mvneta_ethtool_stats {
struct mvneta_stats ps;
u64 skb_alloc_error;
u64 refill_error;
};
struct mvneta_pcpu_stats {
struct u64_stats_sync syncp;
struct mvneta_ethtool_stats es;
u64 rx_dropped;
u64 rx_errors;
};
struct mvneta_pcpu_port {
......@@ -660,10 +684,6 @@ struct mvneta_rx_queue {
/* pointer to uncomplete skb buffer */
struct sk_buff *skb;
int left_size;
/* error counters */
u32 skb_alloc_err;
u32 refill_err;
};
static enum cpuhp_state online_hpstate;
......@@ -748,12 +768,12 @@ mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
rx_packets = cpu_stats->es.ps.rx_packets;
rx_bytes = cpu_stats->es.ps.rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
tx_packets = cpu_stats->es.ps.tx_packets;
tx_bytes = cpu_stats->es.ps.tx_bytes;
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
......@@ -1942,19 +1962,18 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
}
static void
mvneta_update_stats(struct mvneta_port *pp, u32 pkts,
u32 len, bool tx)
mvneta_update_stats(struct mvneta_port *pp,
struct mvneta_stats *ps)
{
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u64_stats_update_begin(&stats->syncp);
if (tx) {
stats->tx_packets += pkts;
stats->tx_bytes += len;
} else {
stats->rx_packets += pkts;
stats->rx_bytes += len;
}
stats->es.ps.rx_packets += ps->rx_packets;
stats->es.ps.rx_bytes += ps->rx_bytes;
/* xdp */
stats->es.ps.xdp_redirect += ps->xdp_redirect;
stats->es.ps.xdp_pass += ps->xdp_pass;
stats->es.ps.xdp_drop += ps->xdp_drop;
u64_stats_update_end(&stats->syncp);
}
......@@ -1969,9 +1988,15 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
rx_desc = rxq->descs + curr_desc;
if (!(rx_desc->buf_phys_addr)) {
if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
struct mvneta_pcpu_stats *stats;
pr_err("Can't refill queue %d. Done %d from %d\n",
rxq->id, i, rxq->refill_num);
rxq->refill_err++;
stats = this_cpu_ptr(pp->stats);
u64_stats_update_begin(&stats->syncp);
stats->es.refill_error++;
u64_stats_update_end(&stats->syncp);
break;
}
}
......@@ -1987,6 +2012,7 @@ static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
struct xdp_frame *xdpf, bool dma_map)
{
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_desc *tx_desc;
struct mvneta_tx_buf *buf;
dma_addr_t dma_addr;
......@@ -2021,7 +2047,12 @@ mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
tx_desc->buf_phys_addr = dma_addr;
tx_desc->data_size = xdpf->len;
mvneta_update_stats(pp, 1, xdpf->len, true);
u64_stats_update_begin(&stats->syncp);
stats->es.ps.tx_bytes += xdpf->len;
stats->es.ps.tx_packets++;
stats->es.ps.xdp_tx++;
u64_stats_update_end(&stats->syncp);
mvneta_txq_inc_put(txq);
txq->pending++;
txq->count++;
......@@ -2090,7 +2121,8 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp)
struct bpf_prog *prog, struct xdp_buff *xdp,
struct mvneta_stats *stats)
{
unsigned int len;
u32 ret, act;
......@@ -2100,8 +2132,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
switch (act) {
case XDP_PASS:
ret = MVNETA_XDP_PASS;
break;
stats->xdp_pass++;
return MVNETA_XDP_PASS;
case XDP_REDIRECT: {
int err;
......@@ -2113,6 +2145,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
len, true);
} else {
ret = MVNETA_XDP_REDIR;
stats->xdp_redirect++;
}
break;
}
......@@ -2134,9 +2167,13 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
virt_to_head_page(xdp->data),
len, true);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
}
stats->rx_bytes += xdp->data_end - xdp->data;
stats->rx_packets++;
return ret;
}
......@@ -2146,12 +2183,14 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog,
struct page *page, u32 *xdp_ret)
struct page *page,
struct mvneta_stats *stats)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
int ret = 0;
if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
......@@ -2175,17 +2214,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
xdp_set_data_meta_invalid(xdp);
if (xdp_prog) {
u32 ret;
ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp);
if (ret != MVNETA_XDP_PASS) {
mvneta_update_stats(pp, 1,
xdp->data_end - xdp->data,
false);
rx_desc->buf_phys_addr = 0;
*xdp_ret |= ret;
return ret;
}
ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
if (ret)
goto out;
}
rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
......@@ -2193,9 +2224,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
rxq->skb_alloc_err++;
u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++;
stats->rx_dropped++;
u64_stats_update_end(&stats->syncp);
......@@ -2209,9 +2240,11 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
rxq->left_size = rx_desc->data_size - len;
out:
rx_desc->buf_phys_addr = 0;
return 0;
return ret;
}
static void
......@@ -2252,12 +2285,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
struct mvneta_port *pp, int budget,
struct mvneta_rx_queue *rxq)
{
int rcvd_pkts = 0, rcvd_bytes = 0, rx_proc = 0;
int rx_proc = 0, rx_todo, refill;
struct net_device *dev = pp->dev;
struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
struct xdp_buff xdp_buf;
int rx_todo, refill;
u32 xdp_ret = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
......@@ -2290,7 +2322,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
xdp_prog, page, &xdp_ret);
xdp_prog, page, &ps);
if (err)
continue;
} else {
......@@ -2314,8 +2346,9 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rxq->skb = NULL;
continue;
}
rcvd_pkts++;
rcvd_bytes += rxq->skb->len;
ps.rx_bytes += rxq->skb->len;
ps.rx_packets++;
/* Linux processing */
rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
......@@ -2327,11 +2360,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
rcu_read_unlock();
if (xdp_ret & MVNETA_XDP_REDIR)
if (ps.xdp_redirect)
xdp_do_flush_map();
if (rcvd_pkts)
mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
if (ps.rx_packets)
mvneta_update_stats(pp, &ps);
/* return some buffers to hardware queue, one at a time is too slow */
refill = mvneta_rx_refill_queue(pp, rxq);
......@@ -2339,7 +2372,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
return rcvd_pkts;
return ps.rx_packets;
}
/* Main rx processing when using hardware buffer management */
......@@ -2423,8 +2456,15 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
/* Refill processing */
err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
if (err) {
struct mvneta_pcpu_stats *stats;
netdev_err(dev, "Linux processing - Can't refill\n");
rxq->refill_err++;
stats = this_cpu_ptr(pp->stats);
u64_stats_update_begin(&stats->syncp);
stats->es.refill_error++;
u64_stats_update_end(&stats->syncp);
goto err_drop_frame_ret_pool;
}
......@@ -2454,8 +2494,14 @@ static int mvneta_rx_hwbm(struct napi_struct *napi,
napi_gro_receive(napi, skb);
}
if (rcvd_pkts)
mvneta_update_stats(pp, rcvd_pkts, rcvd_bytes, false);
if (rcvd_pkts) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
u64_stats_update_begin(&stats->syncp);
stats->es.ps.rx_packets += rcvd_pkts;
stats->es.ps.rx_bytes += rcvd_bytes;
u64_stats_update_end(&stats->syncp);
}
/* Update rxq management counters */
mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
......@@ -2711,6 +2757,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
out:
if (frags > 0) {
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
netdev_tx_sent_queue(nq, len);
......@@ -2724,7 +2771,10 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
else
txq->pending += frags;
mvneta_update_stats(pp, 1, len, true);
u64_stats_update_begin(&stats->syncp);
stats->es.ps.tx_bytes += len;
stats->es.ps.tx_packets++;
u64_stats_update_end(&stats->syncp);
} else {
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
......@@ -4420,45 +4470,94 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
}
}
static void
mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
struct mvneta_ethtool_stats *es)
{
unsigned int start;
int cpu;
for_each_possible_cpu(cpu) {
struct mvneta_pcpu_stats *stats;
u64 skb_alloc_error;
u64 refill_error;
u64 xdp_redirect;
u64 xdp_pass;
u64 xdp_drop;
u64 xdp_tx;
stats = per_cpu_ptr(pp->stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
skb_alloc_error = stats->es.skb_alloc_error;
refill_error = stats->es.refill_error;
xdp_redirect = stats->es.ps.xdp_redirect;
xdp_pass = stats->es.ps.xdp_pass;
xdp_drop = stats->es.ps.xdp_drop;
xdp_tx = stats->es.ps.xdp_tx;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
es->skb_alloc_error += skb_alloc_error;
es->refill_error += refill_error;
es->ps.xdp_redirect += xdp_redirect;
es->ps.xdp_pass += xdp_pass;
es->ps.xdp_drop += xdp_drop;
es->ps.xdp_tx += xdp_tx;
}
}
static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
{
struct mvneta_ethtool_stats stats = {};
const struct mvneta_statistic *s;
void __iomem *base = pp->base;
u32 high, low;
u64 val;
int i;
mvneta_ethtool_update_pcpu_stats(pp, &stats);
for (i = 0, s = mvneta_statistics;
s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
s++, i++) {
val = 0;
switch (s->type) {
case T_REG_32:
val = readl_relaxed(base + s->offset);
pp->ethtool_stats[i] += val;
break;
case T_REG_64:
/* Docs say to read low 32-bit then high */
low = readl_relaxed(base + s->offset);
high = readl_relaxed(base + s->offset + 4);
val = (u64)high << 32 | low;
pp->ethtool_stats[i] += val;
break;
case T_SW:
switch (s->offset) {
case ETHTOOL_STAT_EEE_WAKEUP:
val = phylink_get_eee_err(pp->phylink);
pp->ethtool_stats[i] += val;
break;
case ETHTOOL_STAT_SKB_ALLOC_ERR:
val = pp->rxqs[0].skb_alloc_err;
pp->ethtool_stats[i] = stats.skb_alloc_error;
break;
case ETHTOOL_STAT_REFILL_ERR:
val = pp->rxqs[0].refill_err;
pp->ethtool_stats[i] = stats.refill_error;
break;
case ETHTOOL_XDP_REDIRECT:
pp->ethtool_stats[i] = stats.ps.xdp_redirect;
break;
case ETHTOOL_XDP_PASS:
pp->ethtool_stats[i] = stats.ps.xdp_pass;
break;
case ETHTOOL_XDP_DROP:
pp->ethtool_stats[i] = stats.ps.xdp_drop;
break;
case ETHTOOL_XDP_TX:
pp->ethtool_stats[i] = stats.ps.xdp_tx;
break;
}
break;
}
pp->ethtool_stats[i] += val;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment