Commit 3b43190b authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5: Introduce API for request and release IRQs

Introduce new API that will allow IRQs users to hold a pointer to
mlx5_irq.
In the end of this series, IRQs will be allocated on demand. Hence,
this will allow us to properly manage and use IRQs.
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent c38421ab
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "eswitch.h" #include "eswitch.h"
#include "lib/clock.h" #include "lib/clock.h"
#include "diag/fw_tracer.h" #include "diag/fw_tracer.h"
#include "mlx5_irq.h"
enum { enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1, MLX5_EQE_OWNER_INIT_VAL = 0x1,
...@@ -309,13 +310,19 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, ...@@ -309,13 +310,19 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc); mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq); init_eq_buf(eq);
eq->irq = mlx5_irq_request(dev, vecidx);
if (IS_ERR(eq->irq)) {
err = PTR_ERR(eq->irq);
goto err_buf;
}
inlen = MLX5_ST_SZ_BYTES(create_eq_in) + inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages; MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { if (!in) {
err = -ENOMEM; err = -ENOMEM;
goto err_buf; goto err_irq;
} }
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
...@@ -359,6 +366,8 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, ...@@ -359,6 +366,8 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
err_in: err_in:
kvfree(in); kvfree(in);
err_irq:
mlx5_irq_release(eq->irq);
err_buf: err_buf:
mlx5_frag_buf_free(dev, &eq->frag_buf); mlx5_frag_buf_free(dev, &eq->frag_buf);
return err; return err;
...@@ -377,10 +386,9 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, ...@@ -377,10 +386,9 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb) struct notifier_block *nb)
{ {
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err; int err;
err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb); err = mlx5_irq_attach_nb(eq->irq, nb);
if (!err) if (!err)
eq_update_ci(eq, 1); eq_update_ci(eq, 1);
...@@ -399,9 +407,7 @@ EXPORT_SYMBOL(mlx5_eq_enable); ...@@ -399,9 +407,7 @@ EXPORT_SYMBOL(mlx5_eq_enable);
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb) struct notifier_block *nb)
{ {
struct mlx5_eq_table *eq_table = dev->priv.eq_table; mlx5_irq_detach_nb(eq->irq, nb);
mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
} }
EXPORT_SYMBOL(mlx5_eq_disable); EXPORT_SYMBOL(mlx5_eq_disable);
...@@ -415,10 +421,9 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) ...@@ -415,10 +421,9 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (err) if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn); eq->eqn);
synchronize_irq(eq->irqn); mlx5_irq_release(eq->irq);
mlx5_frag_buf_free(dev, &eq->frag_buf); mlx5_frag_buf_free(dev, &eq->frag_buf);
return err; return err;
} }
...@@ -863,7 +868,6 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ...@@ -863,7 +868,6 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
} }
return 0; return 0;
clean: clean:
destroy_comp_eqs(dev); destroy_comp_eqs(dev);
return err; return err;
......
...@@ -32,6 +32,7 @@ struct mlx5_eq { ...@@ -32,6 +32,7 @@ struct mlx5_eq {
unsigned int irqn; unsigned int irqn;
u8 eqn; u8 eqn;
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
struct mlx5_irq *irq;
}; };
struct mlx5_eq_async { struct mlx5_eq_async {
......
...@@ -76,6 +76,7 @@ ...@@ -76,6 +76,7 @@
#include "sf/vhca_event.h" #include "sf/vhca_event.h"
#include "sf/dev/dev.h" #include "sf/dev/dev.h"
#include "sf/sf.h" #include "sf/sf.h"
#include "mlx5_irq.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
......
...@@ -169,25 +169,6 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev ...@@ -169,25 +169,6 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev); void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev); void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
struct notifier_block *nb);
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int msix_vec_count);
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev); int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev); void mlx5_events_cleanup(struct mlx5_core_dev *dev);
void mlx5_events_start(struct mlx5_core_dev *dev); void mlx5_events_start(struct mlx5_core_dev *dev);
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021 Mellanox Technologies. */
#ifndef __MLX5_IRQ_H__
#define __MLX5_IRQ_H__
#include <linux/mlx5/driver.h>
struct mlx5_irq;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_irq_table_create(struct mlx5_core_dev *dev);
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int msix_vec_count);
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx);
void mlx5_irq_release(struct mlx5_irq *irq);
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
#endif /* __MLX5_IRQ_H__ */
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "mlx5_irq.h"
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h> #include <linux/cpu_rmap.h>
#endif #endif
...@@ -160,13 +161,10 @@ static void irq_put(struct mlx5_irq *irq) ...@@ -160,13 +161,10 @@ static void irq_put(struct mlx5_irq *irq)
kref_put(&irq->kref, irq_release); kref_put(&irq->kref, irq_release);
} }
int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx, int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
struct notifier_block *nb)
{ {
struct mlx5_irq *irq;
int err; int err;
irq = &irq_table->irq[vecidx];
err = kref_get_unless_zero(&irq->kref); err = kref_get_unless_zero(&irq->kref);
if (WARN_ON_ONCE(!err)) if (WARN_ON_ONCE(!err))
/* Something very bad happens here, we are enabling EQ /* Something very bad happens here, we are enabling EQ
...@@ -179,16 +177,31 @@ int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx, ...@@ -179,16 +177,31 @@ int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
return err; return err;
} }
int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx, int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
struct notifier_block *nb)
{ {
struct mlx5_irq *irq;
irq = &irq_table->irq[vecidx];
irq_put(irq); irq_put(irq);
return atomic_notifier_chain_unregister(&irq->nh, nb); return atomic_notifier_chain_unregister(&irq->nh, nb);
} }
void mlx5_irq_release(struct mlx5_irq *irq)
{
synchronize_irq(irq->irqn);
irq_put(irq);
}
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx)
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq *irq = &table->irq[vecidx];
int err;
err = kref_get_unless_zero(&irq->kref);
if (!err)
return ERR_PTR(-ENOENT);
return irq;
}
static irqreturn_t mlx5_irq_int_handler(int irq, void *nh) static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
{ {
atomic_notifier_call_chain(nh, 0, NULL); atomic_notifier_call_chain(nh, 0, NULL);
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "mlx5_irq.h"
#include "eswitch.h" #include "eswitch.h"
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf) static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment