Commit a5e89a3f authored by Rahul Rameshbabu's avatar Rahul Rameshbabu Committed by Jakub Kicinski

net/mlx5e: Dynamically allocate DIM structure for SQs/RQs

Make it possible for the DIM structure to be torn down while an SQ or RQ is
still active. Changing the CQ period mode is an example where the previous
sampling done with the DIM structure would need to be invalidated.
Co-developed-by: default avatarNabil S. Alramli <dev@nalramli.com>
Signed-off-by: default avatarNabil S. Alramli <dev@nalramli.com>
Co-developed-by: default avatarJoe Damato <jdamato@fastly.com>
Signed-off-by: default avatarJoe Damato <jdamato@fastly.com>
Signed-off-by: default avatarRahul Rameshbabu <rrameshbabu@nvidia.com>
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Link: https://lore.kernel.org/r/20240419080445.417574-4-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent eca1e8a6
...@@ -430,7 +430,7 @@ struct mlx5e_txqsq { ...@@ -430,7 +430,7 @@ struct mlx5e_txqsq {
u16 cc; u16 cc;
u16 skb_fifo_cc; u16 skb_fifo_cc;
u32 dma_fifo_cc; u32 dma_fifo_cc;
struct dim dim; /* Adaptive Moderation */ struct dim *dim; /* Adaptive Moderation */
/* dirtied @xmit */ /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp; u16 pc ____cacheline_aligned_in_smp;
...@@ -722,7 +722,7 @@ struct mlx5e_rq { ...@@ -722,7 +722,7 @@ struct mlx5e_rq {
int ix; int ix;
unsigned int hw_mtu; unsigned int hw_mtu;
struct dim dim; /* Dynamic Interrupt Moderation */ struct dim *dim; /* Dynamic Interrupt Moderation */
/* XDP */ /* XDP */
struct bpf_prog __rcu *xdp_prog; struct bpf_prog __rcu *xdp_prog;
......
...@@ -44,7 +44,7 @@ mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder, ...@@ -44,7 +44,7 @@ mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
void mlx5e_rx_dim_work(struct work_struct *work) void mlx5e_rx_dim_work(struct work_struct *work)
{ {
struct dim *dim = container_of(work, struct dim, work); struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); struct mlx5e_rq *rq = dim->priv;
struct dim_cq_moder cur_moder = struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix); net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
...@@ -54,7 +54,7 @@ void mlx5e_rx_dim_work(struct work_struct *work) ...@@ -54,7 +54,7 @@ void mlx5e_rx_dim_work(struct work_struct *work)
void mlx5e_tx_dim_work(struct work_struct *work) void mlx5e_tx_dim_work(struct work_struct *work)
{ {
struct dim *dim = container_of(work, struct dim, work); struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); struct mlx5e_txqsq *sq = dim->priv;
struct dim_cq_moder cur_moder = struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix); net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
......
...@@ -962,11 +962,20 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, ...@@ -962,11 +962,20 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
} }
} }
INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); rq->dim = kvzalloc_node(sizeof(*rq->dim), GFP_KERNEL, node);
rq->dim.mode = params->rx_cq_moderation.cq_period_mode; if (!rq->dim) {
err = -ENOMEM;
goto err_unreg_xdp_rxq_info;
}
rq->dim->priv = rq;
INIT_WORK(&rq->dim->work, mlx5e_rx_dim_work);
rq->dim->mode = params->rx_cq_moderation.cq_period_mode;
return 0; return 0;
err_unreg_xdp_rxq_info:
xdp_rxq_info_unreg(&rq->xdp_rxq);
err_destroy_page_pool: err_destroy_page_pool:
page_pool_destroy(rq->page_pool); page_pool_destroy(rq->page_pool);
err_free_by_rq_type: err_free_by_rq_type:
...@@ -1014,6 +1023,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -1014,6 +1023,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5e_free_wqe_alloc_info(rq); mlx5e_free_wqe_alloc_info(rq);
} }
kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq); xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool); page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl); mlx5_wq_destroy(&rq->wq_ctrl);
...@@ -1341,7 +1351,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) ...@@ -1341,7 +1351,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq)
{ {
cancel_work_sync(&rq->dim.work); cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work); cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq); mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq); mlx5e_free_rx_descs(rq);
...@@ -1616,12 +1626,20 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1616,12 +1626,20 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err) if (err)
goto err_sq_wq_destroy; goto err_sq_wq_destroy;
sq->dim = kvzalloc_node(sizeof(*sq->dim), GFP_KERNEL, cpu_to_node(c->cpu));
if (!sq->dim) {
err = -ENOMEM;
goto err_free_txqsq_db;
}
INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); sq->dim->priv = sq;
sq->dim.mode = params->tx_cq_moderation.cq_period_mode; INIT_WORK(&sq->dim->work, mlx5e_tx_dim_work);
sq->dim->mode = params->tx_cq_moderation.cq_period_mode;
return 0; return 0;
err_free_txqsq_db:
mlx5e_free_txqsq_db(sq);
err_sq_wq_destroy: err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl); mlx5_wq_destroy(&sq->wq_ctrl);
...@@ -1630,6 +1648,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1630,6 +1648,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{ {
kvfree(sq->dim);
mlx5e_free_txqsq_db(sq); mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl); mlx5_wq_destroy(&sq->wq_ctrl);
} }
...@@ -1841,7 +1860,7 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) ...@@ -1841,7 +1860,7 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = sq->mdev; struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0}; struct mlx5_rate_limit rl = {0};
cancel_work_sync(&sq->dim.work); cancel_work_sync(&sq->dim->work);
cancel_work_sync(&sq->recover_work); cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn); mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) { if (sq->rate_limit) {
......
...@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) ...@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
return; return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&sq->dim, dim_sample); net_dim(sq->dim, dim_sample);
} }
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
...@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) ...@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return; return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&rq->dim, dim_sample); net_dim(rq->dim, dim_sample);
} }
void mlx5e_trigger_irq(struct mlx5e_icosq *sq) void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment