Commit def09e7b authored by Khalid Manaa's avatar Khalid Manaa Committed by Saeed Mahameed

net/mlx5e: Add HW_GRO statistics

This patch adds HW_GRO counters to RX packets statistics:
 - gro_match_packets: counter of received packets with set match flag.

 - gro_packets: counter of received packets over the HW_GRO feature,
                this counter is increased by one for every received
                HW_GRO cqe.

 - gro_bytes: counter of received bytes over the HW_GRO feature,
              this counter is increased by the received bytes for every
              received HW_GRO cqe.

 - gro_skbs: counter of built HW_GRO skbs,
             increased by one when we flush HW_GRO skb
             (when we call a napi_gro_receive with hw_gro skb).

 - gro_large_hds: counter of received packets with large headers size,
                  in case the packet needs new SKB, the driver will allocate
                  new one and will not use the headers entry to build it.
Signed-off-by: default avatarKhalid Manaa <khalidm@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 92552d3a
......@@ -1461,7 +1461,9 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5e_rq_stats *stats = rq->stats;
stats->packets++;
stats->gro_packets++;
stats->bytes += cqe_bcnt;
stats->gro_bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
return;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
......@@ -1914,6 +1916,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
} else {
/* allocate SKB and copy header for large header */
rq->stats->gro_large_hds++;
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(head_size, sizeof(long)));
if (unlikely(!skb)) {
......@@ -1949,7 +1952,9 @@ static void
mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
{
struct sk_buff *skb = rq->hw_gro_data->skb;
struct mlx5e_rq_stats *stats = rq->stats;
stats->gro_skbs++;
if (likely(skb_shinfo(skb)->nr_frags))
mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
if (NAPI_GRO_CB(skb)->count > 1)
......@@ -1992,6 +1997,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match;
struct mlx5e_rq_stats *stats = rq->stats;
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5e_dma_info *di;
struct mlx5e_mpw_info *wi;
......@@ -2002,18 +2008,18 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
trigger_report(rq, cqe);
rq->stats->wqe_err++;
stats->wqe_err++;
goto mpwrq_cqe_out;
}
if (unlikely(mpwrq_is_filler_cqe(cqe))) {
struct mlx5e_rq_stats *stats = rq->stats;
stats->mpwqe_filler_cqes++;
stats->mpwqe_filler_strides += cstrides;
goto mpwrq_cqe_out;
}
stats->gro_match_packets += match;
if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
match = false;
mlx5e_shampo_flush_skb(rq, cqe, match);
......
......@@ -128,6 +128,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
......@@ -313,6 +318,11 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_gro_packets += rq_stats->gro_packets;
s->rx_gro_bytes += rq_stats->gro_bytes;
s->rx_gro_skbs += rq_stats->gro_skbs;
s->rx_gro_match_packets += rq_stats->gro_match_packets;
s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
......@@ -1760,6 +1770,11 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
......
......@@ -144,6 +144,11 @@ struct mlx5e_sw_stats {
u64 tx_mpwqe_pkts;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_gro_packets;
u64 rx_gro_bytes;
u64 rx_gro_skbs;
u64 rx_gro_match_packets;
u64 rx_gro_large_hds;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
......@@ -322,6 +327,11 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
u64 gro_packets;
u64 gro_bytes;
u64 gro_skbs;
u64 gro_match_packets;
u64 gro_large_hds;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment