Commit 17c8a0d7 authored by Saeed Mahameed's avatar Saeed Mahameed Committed by Greg Kroah-Hartman

net/mlx5: EQ, Use the right place to store/read IRQ affinity hint

[ Upstream commit 1e86ace4 ]

Currently the cpu affinity hint mask for completion EQs is stored and
read from the wrong place, since reading and storing is done from the
same index, there is no actual issue with that, but internal irq_info
for completion EQs stars at MLX5_EQ_VEC_COMP_BASE offset in irq_info
array, this patch changes the code to use the correct offset to store
and read the IRQ affinity hint.
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 4f558b83
...@@ -1605,7 +1605,7 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) ...@@ -1605,7 +1605,7 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
{ {
return cpumask_first(priv->mdev->priv.irq_info[ix].mask); return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
} }
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
......
...@@ -619,18 +619,19 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) ...@@ -619,18 +619,19 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
{ {
struct mlx5_priv *priv = &mdev->priv; struct mlx5_priv *priv = &mdev->priv;
int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
int irq = pci_irq_vector(mdev->pdev, vecidx);
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&priv->irq_info[vecidx].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
return -ENOMEM; return -ENOMEM;
} }
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask); priv->irq_info[vecidx].mask);
if (IS_ENABLED(CONFIG_SMP) && if (IS_ENABLED(CONFIG_SMP) &&
irq_set_affinity_hint(irq, priv->irq_info[i].mask)) irq_set_affinity_hint(irq, priv->irq_info[vecidx].mask))
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
return 0; return 0;
...@@ -638,11 +639,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) ...@@ -638,11 +639,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
{ {
int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
struct mlx5_priv *priv = &mdev->priv; struct mlx5_priv *priv = &mdev->priv;
int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); int irq = pci_irq_vector(mdev->pdev, vecidx);
irq_set_affinity_hint(irq, NULL); irq_set_affinity_hint(irq, NULL);
free_cpumask_var(priv->irq_info[i].mask); free_cpumask_var(priv->irq_info[vecidx].mask);
} }
static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
......
...@@ -1195,7 +1195,7 @@ enum { ...@@ -1195,7 +1195,7 @@ enum {
static inline const struct cpumask * static inline const struct cpumask *
mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
{ {
return dev->priv.irq_info[vector].mask; return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
} }
#endif /* MLX5_DRIVER_H */ #endif /* MLX5_DRIVER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment