Commit fc63dd2a authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5: Change IRQ storage logic from static to dynamic

Store newly created IRQs in the xarray DB instead of a static array,
so we will be able to store only IRQs which are being used.
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 2d74524c
...@@ -892,10 +892,16 @@ EXPORT_SYMBOL(mlx5_comp_vectors_count); ...@@ -892,10 +892,16 @@ EXPORT_SYMBOL(mlx5_comp_vectors_count);
struct cpumask * struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{ {
int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE; struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq, *n;
int i = 0;
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
if (i++ == vector)
break;
}
return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table, return mlx5_irq_get_affinity_mask(eq->core.irq);
vecidx);
} }
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask); EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
......
...@@ -24,7 +24,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx, ...@@ -24,7 +24,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx,
void mlx5_irq_release(struct mlx5_irq *irq); void mlx5_irq_release(struct mlx5_irq *irq);
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb); int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb); int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask * struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
#endif /* __MLX5_IRQ_H__ */ #endif /* __MLX5_IRQ_H__ */
...@@ -14,15 +14,17 @@ ...@@ -14,15 +14,17 @@
#define MLX5_MAX_IRQ_NAME (32) #define MLX5_MAX_IRQ_NAME (32)
struct mlx5_irq { struct mlx5_irq {
u32 index;
struct atomic_notifier_head nh; struct atomic_notifier_head nh;
cpumask_var_t mask; cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME]; char name[MLX5_MAX_IRQ_NAME];
struct kref kref; struct kref kref;
int irqn; int irqn;
struct mlx5_irq_table *table;
}; };
struct mlx5_irq_table { struct mlx5_irq_table {
struct mlx5_irq *irq; struct xarray irqs;
int nvec; int nvec;
}; };
...@@ -54,13 +56,6 @@ int mlx5_irq_get_num_comp(struct mlx5_irq_table *table) ...@@ -54,13 +56,6 @@ int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
return table->nvec - MLX5_IRQ_VEC_COMP_BASE; return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
} }
static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
{
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
return &irq_table->irq[vecidx];
}
/** /**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
* to be ssigned to each VF. * to be ssigned to each VF.
...@@ -149,7 +144,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id, ...@@ -149,7 +144,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
static void irq_release(struct kref *kref) static void irq_release(struct kref *kref)
{ {
struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref); struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
struct mlx5_irq_table *table = irq->table;
xa_erase(&table->irqs, irq->index);
/* free_irq requires that affinity and rmap will be cleared /* free_irq requires that affinity and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap * before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq. * which should be called after alloc_irq but before request_irq.
...@@ -157,6 +154,7 @@ static void irq_release(struct kref *kref) ...@@ -157,6 +154,7 @@ static void irq_release(struct kref *kref)
irq_set_affinity_hint(irq->irqn, NULL); irq_set_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask); free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh); free_irq(irq->irqn, &irq->nh);
kfree(irq);
} }
static void irq_put(struct mlx5_irq *irq) static void irq_put(struct mlx5_irq *irq)
...@@ -203,13 +201,17 @@ static void irq_set_name(char *name, int vecidx) ...@@ -203,13 +201,17 @@ static void irq_set_name(char *name, int vecidx)
vecidx - MLX5_IRQ_VEC_COMP_BASE); vecidx - MLX5_IRQ_VEC_COMP_BASE);
} }
static int irq_request(struct mlx5_core_dev *dev, int i) static struct mlx5_irq *irq_request(struct mlx5_core_dev *dev, int i)
{ {
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
char name[MLX5_MAX_IRQ_NAME]; char name[MLX5_MAX_IRQ_NAME];
struct xa_limit xa_num_irqs;
struct mlx5_irq *irq; struct mlx5_irq *irq;
int err; int err;
irq = mlx5_irq_get(dev, i); irq = kzalloc(sizeof(*irq), GFP_KERNEL);
if (!irq)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i); irq->irqn = pci_irq_vector(dev->pdev, i);
irq_set_name(name, i); irq_set_name(name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
...@@ -226,15 +228,25 @@ static int irq_request(struct mlx5_core_dev *dev, int i) ...@@ -226,15 +228,25 @@ static int irq_request(struct mlx5_core_dev *dev, int i)
err = -ENOMEM; err = -ENOMEM;
goto err_cpumask; goto err_cpumask;
} }
xa_num_irqs.min = 0;
xa_num_irqs.max = table->nvec;
err = xa_alloc(&table->irqs, &irq->index, irq, xa_num_irqs,
GFP_KERNEL);
if (err) {
mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
irq->index, err);
goto err_xa;
}
irq->table = table;
kref_init(&irq->kref); kref_init(&irq->kref);
return 0; return irq;
err_xa:
free_cpumask_var(irq->mask);
err_cpumask: err_cpumask:
free_irq(irq->irqn, &irq->nh); free_irq(irq->irqn, &irq->nh);
err_req_irq: err_req_irq:
if (i != 0) kfree(irq);
irq_set_affinity_notifier(irq->irqn, NULL); return ERR_PTR(err);
return err;
} }
/** /**
...@@ -259,25 +271,25 @@ void mlx5_irq_release(struct mlx5_irq *irq) ...@@ -259,25 +271,25 @@ void mlx5_irq_release(struct mlx5_irq *irq)
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx, struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx,
struct cpumask *affinity) struct cpumask *affinity)
{ {
struct mlx5_irq_table *table = mlx5_irq_table_get(dev); struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
struct mlx5_irq *irq = &table->irq[vecidx]; struct mlx5_irq *irq;
int ret;
ret = kref_get_unless_zero(&irq->kref); irq = xa_load(&irq_table->irqs, vecidx);
if (ret) if (irq) {
kref_get(&irq->kref);
return irq;
}
irq = irq_request(dev, vecidx);
if (IS_ERR(irq))
return irq; return irq;
ret = irq_request(dev, vecidx);
if (ret)
return ERR_PTR(ret);
cpumask_copy(irq->mask, affinity); cpumask_copy(irq->mask, affinity);
irq_set_affinity_hint(irq->irqn, irq->mask); irq_set_affinity_hint(irq->irqn, irq->mask);
return irq; return irq;
} }
struct cpumask * struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
{ {
return irq_table->irq[vecidx].mask; return irq->mask;
} }
int mlx5_irq_table_create(struct mlx5_core_dev *dev) int mlx5_irq_table_create(struct mlx5_core_dev *dev)
...@@ -299,9 +311,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) ...@@ -299,9 +311,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (nvec <= MLX5_IRQ_VEC_COMP_BASE) if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
return -ENOMEM; return -ENOMEM;
table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL); xa_init_flags(&table->irqs, XA_FLAGS_ALLOC);
if (!table->irq)
return -ENOMEM;
nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1, nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
nvec, PCI_IRQ_MSIX); nvec, PCI_IRQ_MSIX);
...@@ -315,19 +325,26 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) ...@@ -315,19 +325,26 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
return 0; return 0;
err_free_irq: err_free_irq:
kfree(table->irq); xa_destroy(&table->irqs);
return err; return err;
} }
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
{ {
struct mlx5_irq_table *table = dev->priv.irq_table; struct mlx5_irq_table *table = dev->priv.irq_table;
struct mlx5_irq *irq;
unsigned long index;
if (mlx5_core_is_sf(dev)) if (mlx5_core_is_sf(dev))
return; return;
/* There are cases where IRQs still will be in used when we reaching
* to here. Hence, making sure all the irqs are realeased.
*/
xa_for_each(&table->irqs, index, irq)
irq_release(&irq->kref);
pci_free_irq_vectors(dev->pdev); pci_free_irq_vectors(dev->pdev);
kfree(table->irq); xa_destroy(&table->irqs);
} }
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev) struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment