Commit ed084fb6 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Daniel Borkmann

net/mlx5e: Allow ICO SQ to be used by multiple RQs

Prepare to creation of the XSK RQ, which will require posting UMRs, too.
The same ICO SQ will be used for both RQs and also to trigger interrupts
by posting NOPs. UMR WQEs can't be reused any more. Optimization
introduced in commit ab966d7e ("net/mlx5e: RX, Recycle buffer of
UMR WQEs") is reverted.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Acked-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent a069e977
...@@ -348,6 +348,13 @@ enum { ...@@ -348,6 +348,13 @@ enum {
struct mlx5e_sq_wqe_info { struct mlx5e_sq_wqe_info {
u8 opcode; u8 opcode;
/* Auxiliary data for different opcodes. */
union {
struct {
struct mlx5e_rq *rq;
} umr;
};
}; };
struct mlx5e_txqsq { struct mlx5e_txqsq {
...@@ -571,6 +578,7 @@ struct mlx5e_rq { ...@@ -571,6 +578,7 @@ struct mlx5e_rq {
u8 log_stride_sz; u8 log_stride_sz;
u8 umr_in_progress; u8 umr_in_progress;
u8 umr_last_bulk; u8 umr_last_bulk;
u8 umr_completed;
} mpwqe; } mpwqe;
}; };
struct { struct {
...@@ -798,6 +806,7 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, ...@@ -798,6 +806,7 @@ void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
......
...@@ -427,11 +427,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) ...@@ -427,11 +427,6 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq); mlx5_wq_ll_update_db_record(wq);
} }
static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
{
return mlx5_wq_cyc_get_ctr_wrap_cnt(&sq->wq, sq->pc);
}
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
struct mlx5_wq_cyc *wq, struct mlx5_wq_cyc *wq,
u16 pi, u16 nnops) u16 pi, u16 nnops)
...@@ -467,9 +462,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -467,9 +462,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
} }
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2)) memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
memcpy(umr_wqe, &rq->mpwqe.umr_wqe,
offsetof(struct mlx5e_umr_wqe, inline_mtts));
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) {
err = mlx5e_page_alloc_mapped(rq, dma_info); err = mlx5e_page_alloc_mapped(rq, dma_info);
...@@ -487,6 +480,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -487,6 +480,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->db.ico_wqe[pi].umr.rq = rq;
sq->pc += MLX5E_UMR_WQEBBS; sq->pc += MLX5E_UMR_WQEBBS;
sq->doorbell_cseg = &umr_wqe->ctrl; sq->doorbell_cseg = &umr_wqe->ctrl;
...@@ -544,11 +538,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ...@@ -544,11 +538,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err; return !!err;
} }
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{ {
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe; struct mlx5_cqe64 *cqe;
u8 completed_umr = 0;
u16 sqcc; u16 sqcc;
int i; int i;
...@@ -589,7 +582,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) ...@@ -589,7 +582,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
if (likely(wi->opcode == MLX5_OPCODE_UMR)) { if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
sqcc += MLX5E_UMR_WQEBBS; sqcc += MLX5E_UMR_WQEBBS;
completed_umr++; wi->umr.rq->mpwqe.umr_completed++;
} else if (likely(wi->opcode == MLX5_OPCODE_NOP)) { } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
sqcc++; sqcc++;
} else { } else {
...@@ -605,24 +598,24 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) ...@@ -605,24 +598,24 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
sq->cc = sqcc; sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
if (likely(completed_umr)) {
mlx5e_post_rx_mpwqe(rq, completed_umr);
rq->mpwqe.umr_in_progress -= completed_umr;
}
} }
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{ {
struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
u8 umr_completed = rq->mpwqe.umr_completed;
u8 missing, i; u8 missing, i;
u16 head; u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false; return false;
mlx5e_poll_ico_cq(&sq->cq, rq); if (umr_completed) {
mlx5e_post_rx_mpwqe(rq, umr_completed);
rq->mpwqe.umr_in_progress -= umr_completed;
rq->mpwqe.umr_completed = 0;
}
missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
......
...@@ -107,7 +107,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ...@@ -107,7 +107,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
busy |= work_done == budget; busy |= work_done == budget;
} }
busy |= c->rq.post_wqes(rq); mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= rq->post_wqes(rq);
if (busy) { if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c))) if (likely(mlx5e_channel_no_affinity_change(c)))
......
...@@ -134,11 +134,6 @@ static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq) ...@@ -134,11 +134,6 @@ static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
*wq->db = cpu_to_be32(wq->wqe_ctr); *wq->db = cpu_to_be32(wq->wqe_ctr);
} }
static inline u16 mlx5_wq_cyc_get_ctr_wrap_cnt(struct mlx5_wq_cyc *wq, u16 ctr)
{
return ctr >> wq->fbc.log_sz;
}
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{ {
return ctr & wq->fbc.sz_m1; return ctr & wq->fbc.sz_m1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment