Commit 081cc2d7 authored by Yuval Avnery's avatar Yuval Avnery Committed by Saeed Mahameed

net/mlx5: Introduce EQ polling budget

Multiple EQs may share the same irq in subsequent patches.
To avoid starvation, a budget is set per EQ's interrupt handler.

Because of this change, it is no longer required to check that
MLX5_NUM_SPARE_EQE eqes were polled (to detect that arm is required).
It is guaranteed that MLX5_NUM_SPARE_EQE > budget, therefore the
handler will arm and exit the handler before all the entries in the
eq are polled.

In the scenario where the handler is out of budget and there are more
EQEs to poll, arming the EQ guarantees that the HW will send another
interrupt and the handler will be called again.
Signed-off-by: default avatarYuval Avnery <yuvalav@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 86eec50b
......@@ -61,6 +61,16 @@ enum {
MLX5_EQ_DOORBEL_OFFSET = 0x40,
};
/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
* the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
* used to set the EQ size, budget must be smaller than the EQ size.
*/
enum {
MLX5_EQ_POLLING_BUDGET = 128,
};
static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
struct mlx5_irq_info {
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
......@@ -129,11 +139,16 @@ static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
struct mlx5_eq_comp *eq_comp = eq_ptr;
struct mlx5_eq *eq = eq_ptr;
struct mlx5_eqe *eqe;
int set_ci = 0;
int num_eqes = 0;
u32 cqn = -1;
while ((eqe = next_eqe_sw(eq))) {
eqe = next_eqe_sw(eq);
if (!eqe)
goto out;
do {
struct mlx5_core_cq *cq;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
......@@ -151,20 +166,10 @@ static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
}
++eq->cons_index;
++set_ci;
/* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MLX5_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
eq_update_ci(eq, 0);
set_ci = 0;
}
}
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
out:
eq_update_ci(eq, 1);
if (cqn != -1)
......@@ -197,12 +202,16 @@ static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
struct mlx5_eq_table *eqt;
struct mlx5_core_dev *dev;
struct mlx5_eqe *eqe;
int set_ci = 0;
int num_eqes = 0;
dev = eq->dev;
eqt = dev->priv.eq_table;
while ((eqe = next_eqe_sw(eq))) {
eqe = next_eqe_sw(eq);
if (!eqe)
goto out;
do {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
......@@ -217,20 +226,10 @@ static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
++set_ci;
/* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MLX5_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
eq_update_ci(eq, 0);
set_ci = 0;
}
}
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
out:
eq_update_ci(eq, 1);
return IRQ_HANDLED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment