Commit 561aa15a authored by Yuval Avnery's avatar Yuval Avnery Committed by Saeed Mahameed

net/mlx5: Separate IRQ data from EQ table data

IRQ table should only exist for mlx5_core_dev for PF and VF only.
EQ table of mediated devices should hold a pointer to the IRQ table
of the parent PCI device.
Signed-off-by: default avatarYuval Avnery <yuvalav@mellanox.com>
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 24163189
...@@ -77,6 +77,14 @@ struct mlx5_irq_info { ...@@ -77,6 +77,14 @@ struct mlx5_irq_info {
char name[MLX5_MAX_IRQ_NAME]; char name[MLX5_MAX_IRQ_NAME];
}; };
struct mlx5_irq_table {
struct mlx5_irq_info *irq_info;
int nvec;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
};
struct mlx5_eq_table { struct mlx5_eq_table {
struct list_head comp_eqs_list; struct list_head comp_eqs_list;
struct mlx5_eq_async pages_eq; struct mlx5_eq_async pages_eq;
...@@ -89,11 +97,8 @@ struct mlx5_eq_table { ...@@ -89,11 +97,8 @@ struct mlx5_eq_table {
struct mlx5_nb cq_err_nb; struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */ struct mutex lock; /* sync async eqs creations */
int num_comp_vectors; int num_comp_eqs;
struct mlx5_irq_info *irq_info; struct mlx5_irq_table *irq_table;
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *rmap;
#endif
}; };
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
...@@ -109,11 +114,33 @@ struct mlx5_eq_table { ...@@ -109,11 +114,33 @@ struct mlx5_eq_table {
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
int mlx5_irq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table;
irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
if (!irq_table)
return -ENOMEM;
dev->priv.irq_table = irq_table;
return 0;
}
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
{
kvfree(dev->priv.irq_table);
}
static int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
{
return table->nvec - MLX5_EQ_VEC_COMP_BASE;
}
static struct mlx5_irq_info *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx) static struct mlx5_irq_info *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
{ {
struct mlx5_eq_table *eq_table = dev->priv.eq_table; struct mlx5_irq_table *irq_table = dev->priv.irq_table;
return &eq_table->irq_info[vecidx]; return &irq_table->irq_info[vecidx];
} }
static int mlx5_irq_attach_nb(struct mlx5_irq_info *irq, static int mlx5_irq_attach_nb(struct mlx5_irq_info *irq,
...@@ -158,15 +185,12 @@ static void irq_set_name(char *name, int vecidx) ...@@ -158,15 +185,12 @@ static void irq_set_name(char *name, int vecidx)
static int request_irqs(struct mlx5_core_dev *dev, int nvec) static int request_irqs(struct mlx5_core_dev *dev, int nvec)
{ {
struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *eq_table;
char name[MLX5_MAX_IRQ_NAME]; char name[MLX5_MAX_IRQ_NAME];
int err; int err;
int i; int i;
eq_table = priv->eq_table;
for (i = 0; i < nvec; i++) { for (i = 0; i < nvec; i++) {
struct mlx5_irq_info *irq_info = &eq_table->irq_info[i]; struct mlx5_irq_info *irq_info = mlx5_irq_get(dev, i);
int irqn = pci_irq_vector(dev->pdev, i); int irqn = pci_irq_vector(dev->pdev, i);
irq_set_name(name, i); irq_set_name(name, i);
...@@ -184,7 +208,7 @@ static int request_irqs(struct mlx5_core_dev *dev, int nvec) ...@@ -184,7 +208,7 @@ static int request_irqs(struct mlx5_core_dev *dev, int nvec)
err_request_irq: err_request_irq:
for (; i >= 0; i--) { for (; i >= 0; i--) {
struct mlx5_irq_info *irq_info = &eq_table->irq_info[i]; struct mlx5_irq_info *irq_info = mlx5_irq_get(dev, i);
int irqn = pci_irq_vector(dev->pdev, i); int irqn = pci_irq_vector(dev->pdev, i);
free_irq(irqn, &irq_info->nh); free_irq(irqn, &irq_info->nh);
...@@ -501,6 +525,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev) ...@@ -501,6 +525,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++) for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
eq_table->irq_table = dev->priv.irq_table;
return 0; return 0;
kvfree_eq_table: kvfree_eq_table:
...@@ -796,10 +821,13 @@ EXPORT_SYMBOL(mlx5_eq_update_ci); ...@@ -796,10 +821,13 @@ EXPORT_SYMBOL(mlx5_eq_update_ci);
static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
{ {
struct mlx5_priv *priv = &mdev->priv;
int vecidx = MLX5_EQ_VEC_COMP_BASE + i; int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
int irq = pci_irq_vector(mdev->pdev, vecidx); struct mlx5_priv *priv = &mdev->priv;
struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx]; struct mlx5_irq_info *irq_info;
int irq;
irq_info = mlx5_irq_get(mdev, vecidx);
irq = pci_irq_vector(mdev->pdev, vecidx);
if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
...@@ -819,20 +847,22 @@ static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) ...@@ -819,20 +847,22 @@ static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
{ {
int vecidx = MLX5_EQ_VEC_COMP_BASE + i; int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
struct mlx5_priv *priv = &mdev->priv; struct mlx5_irq_info *irq_info;
int irq = pci_irq_vector(mdev->pdev, vecidx); int irq;
struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
irq_info = mlx5_irq_get(mdev, vecidx);
irq = pci_irq_vector(mdev->pdev, vecidx);
irq_set_affinity_hint(irq, NULL); irq_set_affinity_hint(irq, NULL);
free_cpumask_var(irq_info->mask); free_cpumask_var(irq_info->mask);
} }
static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev) static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
{ {
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
int err; int err;
int i; int i;
for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) { for (i = 0; i < nvec; i++) {
err = set_comp_irq_affinity_hint(mdev, i); err = set_comp_irq_affinity_hint(mdev, i);
if (err) if (err)
goto err_out; goto err_out;
...@@ -849,9 +879,10 @@ static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev) ...@@ -849,9 +879,10 @@ static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev) static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
{ {
int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
int i; int i;
for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) for (i = 0; i < nvec; i++)
clear_comp_irq_affinity_hint(mdev, i); clear_comp_irq_affinity_hint(mdev, i);
} }
...@@ -863,9 +894,9 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev) ...@@ -863,9 +894,9 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
clear_comp_irqs_affinity_hints(dev); clear_comp_irqs_affinity_hints(dev);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
if (table->rmap) { if (table->irq_table->rmap) {
free_irq_cpu_rmap(table->rmap); free_irq_cpu_rmap(table->irq_table->rmap);
table->rmap = NULL; table->irq_table->rmap = NULL;
} }
#endif #endif
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
...@@ -882,20 +913,20 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ...@@ -882,20 +913,20 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq; struct mlx5_eq_comp *eq;
int ncomp_vec; int ncomp_eqs;
int nent; int nent;
int err; int err;
int i; int i;
INIT_LIST_HEAD(&table->comp_eqs_list); INIT_LIST_HEAD(&table->comp_eqs_list);
ncomp_vec = table->num_comp_vectors; ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE; nent = MLX5_COMP_EQ_SIZE;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
table->rmap = alloc_irq_cpu_rmap(ncomp_vec); table->irq_table->rmap = alloc_irq_cpu_rmap(ncomp_eqs);
if (!table->rmap) if (!table->irq_table->rmap)
return -ENOMEM; return -ENOMEM;
#endif #endif
for (i = 0; i < ncomp_vec; i++) { for (i = 0; i < ncomp_eqs; i++) {
int vecidx = i + MLX5_EQ_VEC_COMP_BASE; int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {}; struct mlx5_eq_param param = {};
...@@ -912,7 +943,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ...@@ -912,7 +943,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
(unsigned long)&eq->tasklet_ctx); (unsigned long)&eq->tasklet_ctx);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx)); irq_cpu_rmap_add(table->irq_table->rmap,
pci_irq_vector(dev->pdev, vecidx));
#endif #endif
eq->irq_nb.notifier_call = mlx5_eq_comp_int; eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) { param = (struct mlx5_eq_param) {
...@@ -967,22 +999,23 @@ EXPORT_SYMBOL(mlx5_vector2eqn); ...@@ -967,22 +999,23 @@ EXPORT_SYMBOL(mlx5_vector2eqn);
unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev) unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
{ {
return dev->priv.eq_table->num_comp_vectors; return dev->priv.eq_table->num_comp_eqs;
} }
EXPORT_SYMBOL(mlx5_comp_vectors_count); EXPORT_SYMBOL(mlx5_comp_vectors_count);
struct cpumask * struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{ {
/* TODO: consider irq_get_affinity_mask(irq) */ int vecidx = vector + MLX5_EQ_VEC_COMP_BASE;
return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
return dev->priv.eq_table->irq_table->irq_info[vecidx].mask;
} }
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask); EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev) struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{ {
return dev->priv.eq_table->rmap; return dev->priv.eq_table->irq_table->rmap;
} }
#endif #endif
...@@ -1008,16 +1041,17 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) ...@@ -1008,16 +1041,17 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
clear_comp_irqs_affinity_hints(dev); clear_comp_irqs_affinity_hints(dev);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
if (table->rmap) { if (table->irq_table->rmap) {
free_irq_cpu_rmap(table->rmap); free_irq_cpu_rmap(table->irq_table->rmap);
table->rmap = NULL; table->irq_table->rmap = NULL;
} }
#endif #endif
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; max_eqs = table->num_comp_eqs + MLX5_EQ_VEC_COMP_BASE;
for (i = max_eqs - 1; i >= 0; i--) { for (i = max_eqs - 1; i >= 0; i--) {
free_irq(pci_irq_vector(dev->pdev, i), &table->irq_info[i].nh); free_irq(pci_irq_vector(dev->pdev, i),
&mlx5_irq_get(dev, i)->nh);
} }
mutex_unlock(&table->lock); mutex_unlock(&table->lock);
pci_free_irq_vectors(dev->pdev); pci_free_irq_vectors(dev->pdev);
...@@ -1026,7 +1060,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) ...@@ -1026,7 +1060,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
static int alloc_irq_vectors(struct mlx5_core_dev *dev) static int alloc_irq_vectors(struct mlx5_core_dev *dev)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
struct mlx5_eq_table *table = priv->eq_table; struct mlx5_irq_table *table = priv->irq_table;
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) : MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq); 1 << MLX5_CAP_GEN(dev, log_max_eq);
...@@ -1050,7 +1084,7 @@ static int alloc_irq_vectors(struct mlx5_core_dev *dev) ...@@ -1050,7 +1084,7 @@ static int alloc_irq_vectors(struct mlx5_core_dev *dev)
goto err_free_irq_info; goto err_free_irq_info;
} }
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; table->nvec = nvec;
err = request_irqs(dev, nvec); err = request_irqs(dev, nvec);
if (err) if (err)
...@@ -1067,17 +1101,19 @@ static int alloc_irq_vectors(struct mlx5_core_dev *dev) ...@@ -1067,17 +1101,19 @@ static int alloc_irq_vectors(struct mlx5_core_dev *dev)
static void free_irq_vectors(struct mlx5_core_dev *dev) static void free_irq_vectors(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_irq_table *table = dev->priv.irq_table;
int i; int i;
for (i = 0; i < table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; i++) for (i = 0; i < table->nvec; i++)
free_irq(pci_irq_vector(dev->pdev, i), &table->irq_info[i].nh); free_irq(pci_irq_vector(dev->pdev, i),
&mlx5_irq_get(dev, i)->nh);
pci_free_irq_vectors(dev->pdev); pci_free_irq_vectors(dev->pdev);
kfree(table->irq_info); kfree(table->irq_info);
} }
int mlx5_eq_table_create(struct mlx5_core_dev *dev) int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err; int err;
err = alloc_irq_vectors(dev); err = alloc_irq_vectors(dev);
...@@ -1086,6 +1122,9 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev) ...@@ -1086,6 +1122,9 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
return err; return err;
} }
eq_table->num_comp_eqs =
mlx5_irq_get_num_comp(eq_table->irq_table);
err = create_async_eqs(dev); err = create_async_eqs(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to create async EQs\n"); mlx5_core_err(dev, "Failed to create async EQs\n");
......
...@@ -804,10 +804,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -804,10 +804,16 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_devcom; goto err_devcom;
} }
err = mlx5_irq_table_init(dev);
if (err) {
mlx5_core_err(dev, "failed to initialize irq table\n");
goto err_devcom;
}
err = mlx5_eq_table_init(dev); err = mlx5_eq_table_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "failed to initialize eq\n"); mlx5_core_err(dev, "failed to initialize eq\n");
goto err_devcom; goto err_irq_cleanup;
} }
err = mlx5_events_init(dev); err = mlx5_events_init(dev);
...@@ -883,6 +889,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -883,6 +889,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
err_eq_cleanup: err_eq_cleanup:
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom: err_devcom:
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devcom);
...@@ -905,6 +913,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -905,6 +913,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_cleanup(dev); mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devcom);
} }
......
...@@ -153,6 +153,9 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, ...@@ -153,6 +153,9 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev); void mlx5_lag_remove(struct mlx5_core_dev *dev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev); int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev); void mlx5_events_cleanup(struct mlx5_core_dev *dev);
void mlx5_events_start(struct mlx5_core_dev *dev); void mlx5_events_start(struct mlx5_core_dev *dev);
......
...@@ -492,6 +492,7 @@ struct mlx5_eswitch; ...@@ -492,6 +492,7 @@ struct mlx5_eswitch;
struct mlx5_lag; struct mlx5_lag;
struct mlx5_devcom; struct mlx5_devcom;
struct mlx5_eq_table; struct mlx5_eq_table;
struct mlx5_irq_table;
struct mlx5_rate_limit { struct mlx5_rate_limit {
u32 rate; u32 rate;
...@@ -521,6 +522,8 @@ struct mlx5_core_roce { ...@@ -521,6 +522,8 @@ struct mlx5_core_roce {
}; };
struct mlx5_priv { struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
struct mlx5_irq_table *irq_table;
struct mlx5_eq_table *eq_table; struct mlx5_eq_table *eq_table;
/* pages stuff */ /* pages stuff */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment