Commit dac0d15f authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: Re-order fields of struct mlx5e_xdpsq

In the downstream patch that adds support to XDP_REDIRECT-out,
the XDP xmit frame function doesn't share the same run context as
the NAPI that polls the XDP-SQ completion queue.

Hence, need to re-order the XDP-SQ fields to avoid cacheline
false-sharing.

Take redirect_flush and doorbell out of DB, into separated
cachelines.

Add a cacheline breaker within the stats struct.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 890388ad
...@@ -409,22 +409,22 @@ struct mlx5e_xdp_info { ...@@ -409,22 +409,22 @@ struct mlx5e_xdp_info {
struct mlx5e_xdpsq { struct mlx5e_xdpsq {
/* data path */ /* data path */
/* dirtied @rx completion */ /* dirtied @completion */
u16 cc; u16 cc;
u16 pc; bool redirect_flush;
struct mlx5e_cq cq; /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
bool doorbell;
/* write@xmit, read@completion */ struct mlx5e_cq cq;
struct {
struct mlx5e_xdp_info *xdpi;
bool doorbell;
bool redirect_flush;
} db;
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats; struct mlx5e_xdpsq_stats *stats;
struct {
struct mlx5e_xdp_info *xdpi;
} db;
void __iomem *uar_map; void __iomem *uar_map;
u32 sqn; u32 sqn;
struct device *pdev; struct device *pdev;
......
...@@ -85,7 +85,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, ...@@ -85,7 +85,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
if (unlikely(err)) if (unlikely(err))
goto xdp_abort; goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
rq->xdpsq.db.redirect_flush = true; rq->xdpsq.redirect_flush = true;
mlx5e_page_dma_unmap(rq, di); mlx5e_page_dma_unmap(rq, di);
rq->stats->xdp_redirect++; rq->stats->xdp_redirect++;
return true; return true;
...@@ -124,10 +124,10 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) ...@@ -124,10 +124,10 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
} }
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
if (sq->db.doorbell) { if (sq->doorbell) {
/* SQ is full, ring doorbell */ /* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq); mlx5e_xmit_xdp_doorbell(sq);
sq->db.doorbell = false; sq->doorbell = false;
} }
stats->full++; stats->full++;
return false; return false;
...@@ -156,7 +156,7 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) ...@@ -156,7 +156,7 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
sq->db.xdpi[pi] = *xdpi; sq->db.xdpi[pi] = *xdpi;
sq->pc++; sq->pc++;
sq->db.doorbell = true; sq->doorbell = true;
stats->xmit++; stats->xmit++;
return true; return true;
......
...@@ -1201,14 +1201,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -1201,14 +1201,14 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe); rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xdpsq->db.doorbell) { if (xdpsq->doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq); mlx5e_xmit_xdp_doorbell(xdpsq);
xdpsq->db.doorbell = false; xdpsq->doorbell = false;
} }
if (xdpsq->db.redirect_flush) { if (xdpsq->redirect_flush) {
xdp_do_flush_map(); xdp_do_flush_map();
xdpsq->db.redirect_flush = false; xdpsq->redirect_flush = false;
} }
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
......
...@@ -230,7 +230,8 @@ struct mlx5e_xdpsq_stats { ...@@ -230,7 +230,8 @@ struct mlx5e_xdpsq_stats {
u64 xmit; u64 xmit;
u64 full; u64 full;
u64 err; u64 err;
u64 cqes; /* dirtied @completion */
u64 cqes ____cacheline_aligned_in_smp;
}; };
struct mlx5e_ch_stats { struct mlx5e_ch_stats {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment