Commit 4b7dfc99 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: Early-return on empty completion queues

NAPI context handles different kinds of completion queues
(RX, TX, and others). Hence, upon a poll trial, some of them
might be empty.
Here we early-return upon empty completion queues, as well as
full rx buffer, and save unnecessary logic and memory barriers.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 4cbb7558
...@@ -459,16 +459,19 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -459,16 +459,19 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{ {
struct mlx5_wq_ll *wq = &rq->wq; struct mlx5_wq_ll *wq = &rq->wq;
int err;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false; return false;
if (mlx5_wq_ll_is_full(wq))
return false;
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
return true; return true;
while (!mlx5_wq_ll_is_full(wq)) { do {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
int err;
err = rq->alloc_wqe(rq, wqe, wq->head); err = rq->alloc_wqe(rq, wqe, wq->head);
if (err == -EBUSY) if (err == -EBUSY)
...@@ -479,14 +482,14 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ...@@ -479,14 +482,14 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
} }
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
} } while (!mlx5_wq_ll_is_full(wq));
/* ensure wqes are visible to device before updating doorbell record */ /* ensure wqes are visible to device before updating doorbell record */
dma_wmb(); dma_wmb();
mlx5_wq_ll_update_db_record(wq); mlx5_wq_ll_update_db_record(wq);
return !mlx5_wq_ll_is_full(wq); return !!err;
} }
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
...@@ -981,7 +984,8 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -981,7 +984,8 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{ {
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; struct mlx5e_xdpsq *xdpsq;
struct mlx5_cqe64 *cqe;
int work_done = 0; int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
...@@ -990,12 +994,13 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -990,12 +994,13 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (cq->decmprs_left) if (cq->decmprs_left)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
for (; work_done < budget; work_done++) { cqe = mlx5_cqwq_get_cqe(&cq->wq);
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_cqe(&cq->wq); if (!cqe)
return 0;
if (!cqe) xdpsq = &rq->xdpsq;
break;
do {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
work_done += work_done +=
mlx5e_decompress_cqes_start(rq, cq, mlx5e_decompress_cqes_start(rq, cq,
...@@ -1006,7 +1011,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -1006,7 +1011,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(&cq->wq);
rq->handle_rx_cqe(rq, cqe); rq->handle_rx_cqe(rq, cqe);
} } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xdpsq->db.doorbell) { if (xdpsq->db.doorbell) {
mlx5e_xmit_xdp_doorbell(xdpsq); mlx5e_xmit_xdp_doorbell(xdpsq);
...@@ -1024,6 +1029,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -1024,6 +1029,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{ {
struct mlx5e_xdpsq *sq; struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
struct mlx5e_rq *rq; struct mlx5e_rq *rq;
u16 sqcc; u16 sqcc;
int i; int i;
...@@ -1033,6 +1039,10 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) ...@@ -1033,6 +1039,10 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false; return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
return false;
rq = container_of(sq, struct mlx5e_rq, xdpsq); rq = container_of(sq, struct mlx5e_rq, xdpsq);
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(), /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
...@@ -1040,15 +1050,11 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) ...@@ -1040,15 +1050,11 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
*/ */
sqcc = sq->cc; sqcc = sq->cc;
for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { i = 0;
struct mlx5_cqe64 *cqe; do {
u16 wqe_counter; u16 wqe_counter;
bool last_wqe; bool last_wqe;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter); wqe_counter = be16_to_cpu(cqe->wqe_counter);
...@@ -1066,7 +1072,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) ...@@ -1066,7 +1072,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
/* Recycle RX page */ /* Recycle RX page */
mlx5e_page_release(rq, di, true); mlx5e_page_release(rq, di, true);
} while (!last_wqe); } while (!last_wqe);
} } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
......
...@@ -394,6 +394,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -394,6 +394,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{ {
struct mlx5e_txqsq *sq; struct mlx5e_txqsq *sq;
struct mlx5_cqe64 *cqe;
u32 dma_fifo_cc; u32 dma_fifo_cc;
u32 nbytes; u32 nbytes;
u16 npkts; u16 npkts;
...@@ -405,6 +406,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -405,6 +406,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return false; return false;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
return false;
npkts = 0; npkts = 0;
nbytes = 0; nbytes = 0;
...@@ -416,15 +421,11 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -416,15 +421,11 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
/* avoid dirtying sq cache line every cqe */ /* avoid dirtying sq cache line every cqe */
dma_fifo_cc = sq->dma_fifo_cc; dma_fifo_cc = sq->dma_fifo_cc;
for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { i = 0;
struct mlx5_cqe64 *cqe; do {
u16 wqe_counter; u16 wqe_counter;
bool last_wqe; bool last_wqe;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (!cqe)
break;
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(&cq->wq);
wqe_counter = be16_to_cpu(cqe->wqe_counter); wqe_counter = be16_to_cpu(cqe->wqe_counter);
...@@ -467,7 +468,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -467,7 +468,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sqcc += wi->num_wqebbs; sqcc += wi->num_wqebbs;
napi_consume_skb(skb, napi_budget); napi_consume_skb(skb, napi_budget);
} while (!last_wqe); } while (!last_wqe);
}
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
mlx5_cqwq_update_db_record(&cq->wq); mlx5_cqwq_update_db_record(&cq->wq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment