Commit ee2a35fe authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2023-10-10' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-10-10

1) Adham Faris, Increase max supported channels number to 256

2) Leon Romanovsky, Allow IPsec soft/hard limits in bytes

3) Shay Drory, Replace global mlx5_intf_lock with
   HCA devcom component lock

4) Wei Zhang, Optimize SF creation flow

During SF creation, HCA state gets changed from INVALID to
IN_USE step by step. Accordingly, FW sends vhca event to
driver to inform about this state change asynchronously.
Each vhca event is critical because all related SW/FW
operations are triggered by it.

Currently there is only a single mlx5 general event handler
which not only handles vhca event but many other events.
This incurs huge bottleneck because all events are forced
to be handled in serial manner.

Moreover, all SFs share same table_lock which inevitably
impacts each other when they are created in parallel.

This series will solve this issue by:

1. A dedicated vhca event handler is introduced to eliminate
   the mutual impact with other mlx5 events.
2. Max FW threads work queues are employed in the vhca event
   handler to fully utilize FW capability.
3. Redesign SF active work logic to completely remove
   table_lock.

With above optimization, SF creation time is reduced by 25%,
i.e. from 80s to 60s when creating 100 SFs.

Patches summary:

Patch 1 - implement dedicated vhca event handler with max FW
          cmd threads of work queues.
Patch 2 - remove table_lock by redesigning SF active work
          logic.

* tag 'mlx5-updates-2023-10-10' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Allow IPsec soft/hard limits in bytes
  net/mlx5e: Increase max supported channels number to 256
  net/mlx5e: Preparations for supporting larger number of channels
  net/mlx5e: Refactor mlx5e_rss_init() and mlx5e_rss_free() API's
  net/mlx5e: Refactor mlx5e_rss_set_rxfh() and mlx5e_rss_get_rxfh()
  net/mlx5e: Refactor rx_res_init() and rx_res_free() APIs
  net/mlx5e: Use PTR_ERR_OR_ZERO() to simplify code
  net/mlx5: Use PTR_ERR_OR_ZERO() to simplify code
  net/mlx5: fix config name in Kconfig parameter documentation
  net/mlx5: Remove unused declaration
  net/mlx5: Replace global mlx5_intf_lock with HCA devcom component lock
  net/mlx5: Refactor LAG peer device lookout bus logic to mlx5 devcom
  net/mlx5: Avoid false positive lockdep warning by adding lock_class_key
  net/mlx5: Redesign SF active work to remove table_lock
  net/mlx5: Parallelize vhca event handling
====================

Link: https://lore.kernel.org/r/20231014171908.290428-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents d4b14c1d 627aa139
...@@ -67,7 +67,7 @@ Enabling the driver and kconfig options ...@@ -67,7 +67,7 @@ Enabling the driver and kconfig options
| Enables :ref:`IPSec XFRM cryptography-offload acceleration <xfrm_device>`. | Enables :ref:`IPSec XFRM cryptography-offload acceleration <xfrm_device>`.
**CONFIG_MLX5_EN_MACSEC=(y/n)** **CONFIG_MLX5_MACSEC=(y/n)**
| Build support for MACsec cryptography-offload acceleration in the NIC. | Build support for MACsec cryptography-offload acceleration in the NIC.
......
...@@ -38,8 +38,6 @@ ...@@ -38,8 +38,6 @@
#include "devlink.h" #include "devlink.h"
#include "lag/lag.h" #include "lag/lag.h"
/* intf dev list mutex */
static DEFINE_MUTEX(mlx5_intf_mutex);
static DEFINE_IDA(mlx5_adev_ida); static DEFINE_IDA(mlx5_adev_ida);
static bool is_eth_rep_supported(struct mlx5_core_dev *dev) static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
...@@ -337,9 +335,9 @@ static void del_adev(struct auxiliary_device *adev) ...@@ -337,9 +335,9 @@ static void del_adev(struct auxiliary_device *adev)
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev) void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev)
{ {
mutex_lock(&mlx5_intf_mutex); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mutex_unlock(&mlx5_intf_mutex); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
} }
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev) bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev)
...@@ -355,7 +353,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) ...@@ -355,7 +353,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i; int ret = 0, i;
devl_assert_locked(priv_to_devlink(dev)); devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH; priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) { if (!priv->adev[i]) {
...@@ -400,7 +398,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) ...@@ -400,7 +398,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break; break;
} }
} }
mutex_unlock(&mlx5_intf_mutex); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
return ret; return ret;
} }
...@@ -413,7 +411,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend) ...@@ -413,7 +411,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
int i; int i;
devl_assert_locked(priv_to_devlink(dev)); devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i]) if (!priv->adev[i])
continue; continue;
...@@ -443,7 +441,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend) ...@@ -443,7 +441,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
priv->adev[i] = NULL; priv->adev[i] = NULL;
} }
priv->flags |= MLX5_PRIV_FLAGS_DETACH; priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
} }
int mlx5_register_device(struct mlx5_core_dev *dev) int mlx5_register_device(struct mlx5_core_dev *dev)
...@@ -451,10 +449,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev) ...@@ -451,10 +449,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
int ret; int ret;
devl_assert_locked(priv_to_devlink(dev)); devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev); ret = mlx5_rescan_drivers_locked(dev);
mutex_unlock(&mlx5_intf_mutex); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
if (ret) if (ret)
mlx5_unregister_device(dev); mlx5_unregister_device(dev);
...@@ -464,10 +462,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev) ...@@ -464,10 +462,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev) void mlx5_unregister_device(struct mlx5_core_dev *dev)
{ {
devl_assert_locked(priv_to_devlink(dev)); devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev); mlx5_rescan_drivers_locked(dev);
mutex_unlock(&mlx5_intf_mutex); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
} }
static int add_drivers(struct mlx5_core_dev *dev) static int add_drivers(struct mlx5_core_dev *dev)
...@@ -545,7 +543,6 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) ...@@ -545,7 +543,6 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH) if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0; return 0;
...@@ -565,85 +562,3 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev ...@@ -565,85 +562,3 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev
return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid); return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
} }
static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
{
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
(dev->pdev->bus->number << 8) |
PCI_SLOT(dev->pdev->devfn));
}
static int _next_phys_dev(struct mlx5_core_dev *mdev,
const struct mlx5_core_dev *curr)
{
if (!mlx5_core_is_pf(mdev))
return 0;
if (mdev == curr)
return 0;
if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
return 0;
return 1;
}
static void *pci_get_other_drvdata(struct device *this, struct device *other)
{
if (this->driver != other->driver)
return NULL;
return pci_get_drvdata(to_pci_dev(other));
}
static int next_phys_dev_lag(struct device *dev, const void *data)
{
struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
mdev = pci_get_other_drvdata(this->device, dev);
if (!mdev)
return 0;
if (!mlx5_lag_is_supported(mdev))
return 0;
return _next_phys_dev(mdev, data);
}
static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
int (*match)(struct device *dev, const void *data))
{
struct device *next;
if (!mlx5_core_is_pf(dev))
return NULL;
next = bus_find_device(&pci_bus_type, NULL, dev, match);
if (!next)
return NULL;
put_device(next);
return pci_get_drvdata(to_pci_dev(next));
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
{
lockdep_assert_held(&mlx5_intf_mutex);
return mlx5_get_next_dev(dev, &next_phys_dev_lag);
}
void mlx5_dev_list_lock(void)
{
mutex_lock(&mlx5_intf_mutex);
}
void mlx5_dev_list_unlock(void)
{
mutex_unlock(&mlx5_intf_mutex);
}
int mlx5_dev_list_trylock(void)
{
return mutex_trylock(&mlx5_intf_mutex);
}
...@@ -141,7 +141,7 @@ struct page_pool; ...@@ -141,7 +141,7 @@ struct page_pool;
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
#define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) #define MLX5E_MAX_NUM_CHANNELS 256
#define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64 #define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
...@@ -200,7 +200,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) ...@@ -200,7 +200,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{ {
return is_kdump_kernel() ? return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS : MLX5E_MIN_NUM_CHANNELS :
min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS); min3(mlx5_comp_vectors_max(mdev), (u32)MLX5E_MAX_NUM_CHANNELS,
(u32)(1 << MLX5_CAP_GEN(mdev, log_max_rqt_size)));
} }
/* The maximum WQE size can be retrieved by max_wqe_sz_sq in /* The maximum WQE size can be retrieved by max_wqe_sz_sq in
......
...@@ -150,7 +150,6 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, ...@@ -150,7 +150,6 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct dentry *dfs_root); struct dentry *dfs_root);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs); void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs); struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs); struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs); struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress); struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
......
...@@ -9,7 +9,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, ...@@ -9,7 +9,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
{ {
unsigned int i; unsigned int i;
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) for (i = 0; i < indir->actual_table_size; i++)
indir->table[i] = i % num_channels; indir->table[i] = i % num_channels;
} }
...@@ -45,9 +45,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, ...@@ -45,9 +45,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
} }
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
bool indir_enabled, u32 init_rqn) bool indir_enabled, u32 init_rqn, u32 indir_table_size)
{ {
u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1; u16 max_size = indir_enabled ? indir_table_size : 1;
return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1); return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
} }
...@@ -68,11 +68,11 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns ...@@ -68,11 +68,11 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns
{ {
unsigned int i; unsigned int i;
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { for (i = 0; i < indir->actual_table_size; i++) {
unsigned int ix = i; unsigned int ix = i;
if (hfunc == ETH_RSS_HASH_XOR) if (hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE)); ix = mlx5e_bits_invert(ix, ilog2(indir->actual_table_size));
ix = indir->table[ix]; ix = indir->table[ix];
...@@ -94,7 +94,7 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, ...@@ -94,7 +94,7 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
u32 *rss_rqns; u32 *rss_rqns;
int err; int err;
rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns) if (!rss_rqns)
return -ENOMEM; return -ENOMEM;
...@@ -102,13 +102,25 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, ...@@ -102,13 +102,25 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
if (err) if (err)
goto out; goto out;
err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE); err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns,
indir->actual_table_size);
out: out:
kvfree(rss_rqns); kvfree(rss_rqns);
return err; return err;
} }
#define MLX5E_UNIFORM_SPREAD_RQT_FACTOR 2
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
{
u32 rqt_size = max_t(u32, MLX5E_INDIR_MIN_RQT_SIZE,
roundup_pow_of_two(num_channels * MLX5E_UNIFORM_SPREAD_RQT_FACTOR));
u32 max_cap_rqt_size = 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size);
return min_t(u32, rqt_size, max_cap_rqt_size);
}
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt) void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{ {
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn); mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
...@@ -151,10 +163,10 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_ ...@@ -151,10 +163,10 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
u32 *rss_rqns; u32 *rss_rqns;
int err; int err;
if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE)) if (WARN_ON(rqt->size != indir->max_table_size))
return -EINVAL; return -EINVAL;
rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns) if (!rss_rqns)
return -ENOMEM; return -ENOMEM;
...@@ -162,7 +174,7 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_ ...@@ -162,7 +174,7 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
if (err) if (err)
goto out; goto out;
err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE); err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size);
out: out:
kvfree(rss_rqns); kvfree(rss_rqns);
......
...@@ -6,12 +6,14 @@ ...@@ -6,12 +6,14 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#define MLX5E_INDIR_RQT_SIZE (1 << 8) #define MLX5E_INDIR_MIN_RQT_SIZE (BIT(8))
struct mlx5_core_dev; struct mlx5_core_dev;
struct mlx5e_rss_params_indir { struct mlx5e_rss_params_indir {
u32 table[MLX5E_INDIR_RQT_SIZE]; u32 *table;
u32 actual_table_size;
u32 max_table_size;
}; };
void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
...@@ -24,7 +26,7 @@ struct mlx5e_rqt { ...@@ -24,7 +26,7 @@ struct mlx5e_rqt {
}; };
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
bool indir_enabled, u32 init_rqn); bool indir_enabled, u32 init_rqn, u32 indir_table_size);
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
u32 *rqns, unsigned int num_rqns, u32 *rqns, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir); u8 hfunc, struct mlx5e_rss_params_indir *indir);
...@@ -35,6 +37,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt) ...@@ -35,6 +37,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
return rqt->rqtn; return rqt->rqtn;
} }
u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn); int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir); u8 hfunc, struct mlx5e_rss_params_indir *indir);
......
...@@ -81,14 +81,75 @@ struct mlx5e_rss { ...@@ -81,14 +81,75 @@ struct mlx5e_rss {
refcount_t refcnt; refcount_t refcnt;
}; };
struct mlx5e_rss *mlx5e_rss_alloc(void) void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{ {
return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL); rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
} }
void mlx5e_rss_free(struct mlx5e_rss *rss) int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
u32 actual_table_size, u32 max_table_size)
{ {
indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
if (!indir->table)
return -ENOMEM;
indir->max_table_size = max_table_size;
indir->actual_table_size = actual_table_size;
return 0;
}
void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir)
{
kvfree(indir->table);
}
static int mlx5e_rss_copy(struct mlx5e_rss *to, const struct mlx5e_rss *from)
{
u32 *dst_indir_table;
if (to->indir.actual_table_size != from->indir.actual_table_size ||
to->indir.max_table_size != from->indir.max_table_size) {
mlx5e_rss_warn(to->mdev,
"Failed to copy RSS due to size mismatch, src (actual %u, max %u) != dst (actual %u, max %u)\n",
from->indir.actual_table_size, from->indir.max_table_size,
to->indir.actual_table_size, to->indir.max_table_size);
return -EINVAL;
}
dst_indir_table = to->indir.table;
*to = *from;
to->indir.table = dst_indir_table;
memcpy(to->indir.table, from->indir.table,
from->indir.actual_table_size * sizeof(*from->indir.table));
return 0;
}
static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
{
struct mlx5e_rss *rss;
int err;
rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
if (!rss)
return ERR_PTR(-ENOMEM);
err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
from->indir.max_table_size);
if (err)
goto err_free_rss;
err = mlx5e_rss_copy(rss, from);
if (err)
goto err_free_indir;
return rss;
err_free_indir:
mlx5e_rss_params_indir_cleanup(&rss->indir);
err_free_rss:
kvfree(rss); kvfree(rss);
return ERR_PTR(err);
} }
static void mlx5e_rss_params_init(struct mlx5e_rss *rss) static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
...@@ -282,28 +343,43 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss) ...@@ -282,28 +343,43 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
return retval; return retval;
} }
int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
bool inner_ft_support, u32 drop_rqn)
{ {
rss->mdev = mdev;
rss->inner_ft_support = inner_ft_support;
rss->drop_rqn = drop_rqn;
mlx5e_rss_params_init(rss); mlx5e_rss_params_init(rss);
refcount_set(&rss->refcnt, 1); refcount_set(&rss->refcnt, 1);
return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn); return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
rss->drop_rqn, rss->indir.max_table_size);
} }
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
bool inner_ft_support, u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
const struct mlx5e_packet_merge_param *init_pkt_merge_param) enum mlx5e_rss_init_type type, unsigned int nch,
unsigned int max_nch)
{ {
struct mlx5e_rss *rss;
int err; int err;
err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn); rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
if (!rss)
return ERR_PTR(-ENOMEM);
err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
mlx5e_rqt_size(mdev, nch),
mlx5e_rqt_size(mdev, max_nch));
if (err)
goto err_free_rss;
rss->mdev = mdev;
rss->inner_ft_support = inner_ft_support;
rss->drop_rqn = drop_rqn;
err = mlx5e_rss_init_no_tirs(rss);
if (err) if (err)
goto err_out; goto err_free_indir;
if (type == MLX5E_RSS_INIT_NO_TIRS)
goto out;
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false); err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
if (err) if (err)
...@@ -315,14 +391,18 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, ...@@ -315,14 +391,18 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
goto err_destroy_tirs; goto err_destroy_tirs;
} }
return 0; out:
return rss;
err_destroy_tirs: err_destroy_tirs:
mlx5e_rss_destroy_tirs(rss, false); mlx5e_rss_destroy_tirs(rss, false);
err_destroy_rqt: err_destroy_rqt:
mlx5e_rqt_destroy(&rss->rqt); mlx5e_rqt_destroy(&rss->rqt);
err_out: err_free_indir:
return err; mlx5e_rss_params_indir_cleanup(&rss->indir);
err_free_rss:
kvfree(rss);
return ERR_PTR(err);
} }
int mlx5e_rss_cleanup(struct mlx5e_rss *rss) int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
...@@ -336,6 +416,8 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss) ...@@ -336,6 +416,8 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, true); mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt); mlx5e_rqt_destroy(&rss->rqt);
mlx5e_rss_params_indir_cleanup(&rss->indir);
kvfree(rss);
return 0; return 0;
} }
...@@ -470,11 +552,9 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, ...@@ -470,11 +552,9 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
{ {
unsigned int i;
if (indir) if (indir)
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) memcpy(indir, rss->indir.table,
indir[i] = rss->indir.table[i]; rss->indir.actual_table_size * sizeof(*rss->indir.table));
if (key) if (key)
memcpy(key, rss->hash.toeplitz_hash_key, memcpy(key, rss->hash.toeplitz_hash_key,
...@@ -495,11 +575,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, ...@@ -495,11 +575,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
struct mlx5e_rss *old_rss; struct mlx5e_rss *old_rss;
int err = 0; int err = 0;
old_rss = mlx5e_rss_alloc(); old_rss = mlx5e_rss_init_copy(rss);
if (!old_rss) if (IS_ERR(old_rss))
return -ENOMEM; return PTR_ERR(old_rss);
*old_rss = *rss;
if (hfunc && *hfunc != rss->hash.hfunc) { if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) { switch (*hfunc) {
...@@ -523,18 +601,16 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, ...@@ -523,18 +601,16 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
} }
if (indir) { if (indir) {
unsigned int i;
changed_indir = true; changed_indir = true;
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) memcpy(rss->indir.table, indir,
rss->indir.table[i] = indir[i]; rss->indir.actual_table_size * sizeof(*rss->indir.table));
} }
if (changed_indir && rss->enabled) { if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, num_rqns); err = mlx5e_rss_apply(rss, rqns, num_rqns);
if (err) { if (err) {
*rss = *old_rss; mlx5e_rss_copy(rss, old_rss);
goto out; goto out;
} }
} }
...@@ -543,7 +619,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, ...@@ -543,7 +619,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
mlx5e_rss_update_tirs(rss); mlx5e_rss_update_tirs(rss);
out: out:
mlx5e_rss_free(old_rss); mlx5e_rss_params_indir_cleanup(&old_rss->indir);
kvfree(old_rss);
return err; return err;
} }
......
...@@ -8,18 +8,24 @@ ...@@ -8,18 +8,24 @@
#include "tir.h" #include "tir.h"
#include "fs.h" #include "fs.h"
enum mlx5e_rss_init_type {
MLX5E_RSS_INIT_NO_TIRS = 0,
MLX5E_RSS_INIT_TIRS
};
struct mlx5e_rss_params_traffic_type struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt); mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss; struct mlx5e_rss;
struct mlx5e_rss *mlx5e_rss_alloc(void); int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
void mlx5e_rss_free(struct mlx5e_rss *rss); u32 actual_table_size, u32 max_table_size);
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
bool inner_ft_support, u32 drop_rqn, void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
const struct mlx5e_packet_merge_param *init_pkt_merge_param); struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner_ft_support, u32 drop_rqn); enum mlx5e_rss_init_type type, unsigned int nch,
unsigned int max_nch);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss); int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss); void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
......
...@@ -18,7 +18,7 @@ struct mlx5e_rx_res { ...@@ -18,7 +18,7 @@ struct mlx5e_rx_res {
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active; bool rss_active;
u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; u32 *rss_rqns;
unsigned int rss_nch; unsigned int rss_nch;
struct { struct {
...@@ -34,41 +34,42 @@ struct mlx5e_rx_res { ...@@ -34,41 +34,42 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */ /* API for rx_res_rss_* */
void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
{
int i;
for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
if (res->rss[i])
mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch);
}
}
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch) unsigned int init_nch)
{ {
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss *rss; struct mlx5e_rss *rss;
int err;
if (WARN_ON(res->rss[0])) if (WARN_ON(res->rss[0]))
return -EINVAL; return -EINVAL;
rss = mlx5e_rss_alloc(); rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
if (!rss) &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
return -ENOMEM; if (IS_ERR(rss))
return PTR_ERR(rss);
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
&res->pkt_merge_param);
if (err)
goto err_rss_free;
mlx5e_rss_set_indir_uniform(rss, init_nch); mlx5e_rss_set_indir_uniform(rss, init_nch);
res->rss[0] = rss; res->rss[0] = rss;
return 0; return 0;
err_rss_free:
mlx5e_rss_free(rss);
return err;
} }
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch) int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
{ {
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss *rss; struct mlx5e_rss *rss;
int err, i; int i;
for (i = 1; i < MLX5E_MAX_NUM_RSS; i++) for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
if (!res->rss[i]) if (!res->rss[i])
...@@ -77,13 +78,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i ...@@ -77,13 +78,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
if (i == MLX5E_MAX_NUM_RSS) if (i == MLX5E_MAX_NUM_RSS)
return -ENOSPC; return -ENOSPC;
rss = mlx5e_rss_alloc(); rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
if (!rss) &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
return -ENOMEM; res->max_nch);
if (IS_ERR(rss))
err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn); return PTR_ERR(rss);
if (err)
goto err_rss_free;
mlx5e_rss_set_indir_uniform(rss, init_nch); mlx5e_rss_set_indir_uniform(rss, init_nch);
if (res->rss_active) if (res->rss_active)
...@@ -93,10 +92,6 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i ...@@ -93,10 +92,6 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
*rss_idx = i; *rss_idx = i;
return 0; return 0;
err_rss_free:
mlx5e_rss_free(rss);
return err;
} }
static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
...@@ -108,7 +103,6 @@ static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) ...@@ -108,7 +103,6 @@ static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
if (err) if (err)
return err; return err;
mlx5e_rss_free(rss);
res->rss[rss_idx] = NULL; res->rss[rss_idx] = NULL;
return 0; return 0;
...@@ -284,9 +278,27 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx) ...@@ -284,9 +278,27 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
/* End of API rx_res_rss_* */ /* End of API rx_res_rss_* */
struct mlx5e_rx_res *mlx5e_rx_res_alloc(void) static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{ {
return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL); kvfree(res->rss_rqns);
kvfree(res);
}
static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch)
{
struct mlx5e_rx_res *rx_res;
rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL);
if (!rx_res)
return NULL;
rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL);
if (!rx_res->rss_rqns) {
kvfree(rx_res);
return NULL;
}
return rx_res;
} }
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
...@@ -308,7 +320,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res) ...@@ -308,7 +320,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
for (ix = 0; ix < res->max_nch; ix++) { for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt, err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
res->mdev, false, res->drop_rqn); res->mdev, false, res->drop_rqn,
mlx5e_rqt_size(res->mdev, res->max_nch));
if (err) { if (err) {
mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n", mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
err, ix); err, ix);
...@@ -362,7 +375,8 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) ...@@ -362,7 +375,8 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (!builder) if (!builder)
return -ENOMEM; return -ENOMEM;
err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn); err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn,
mlx5e_rqt_size(res->mdev, res->max_nch));
if (err) if (err)
goto out; goto out;
...@@ -404,13 +418,19 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res) ...@@ -404,13 +418,19 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
mlx5e_rqt_destroy(&res->ptp.rqt); mlx5e_rqt_destroy(&res->ptp.rqt);
} }
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, struct mlx5e_rx_res *
enum mlx5e_rx_res_features features, unsigned int max_nch, mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, unsigned int max_nch, u32 drop_rqn,
unsigned int init_nch) const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{ {
struct mlx5e_rx_res *res;
int err; int err;
res = mlx5e_rx_res_alloc(mdev, max_nch);
if (!res)
return ERR_PTR(-ENOMEM);
res->mdev = mdev; res->mdev = mdev;
res->features = features; res->features = features;
res->max_nch = max_nch; res->max_nch = max_nch;
...@@ -421,7 +441,7 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, ...@@ -421,7 +441,7 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
err = mlx5e_rx_res_rss_init_def(res, init_nch); err = mlx5e_rx_res_rss_init_def(res, init_nch);
if (err) if (err)
goto err_out; goto err_rx_res_free;
err = mlx5e_rx_res_channels_init(res); err = mlx5e_rx_res_channels_init(res);
if (err) if (err)
...@@ -431,14 +451,15 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, ...@@ -431,14 +451,15 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
if (err) if (err)
goto err_channels_destroy; goto err_channels_destroy;
return 0; return res;
err_channels_destroy: err_channels_destroy:
mlx5e_rx_res_channels_destroy(res); mlx5e_rx_res_channels_destroy(res);
err_rss_destroy: err_rss_destroy:
__mlx5e_rx_res_rss_destroy(res, 0); __mlx5e_rx_res_rss_destroy(res, 0);
err_out: err_rx_res_free:
return err; mlx5e_rx_res_free(res);
return ERR_PTR(err);
} }
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
...@@ -446,11 +467,7 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) ...@@ -446,11 +467,7 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_ptp_destroy(res); mlx5e_rx_res_ptp_destroy(res);
mlx5e_rx_res_channels_destroy(res); mlx5e_rx_res_channels_destroy(res);
mlx5e_rx_res_rss_destroy_all(res); mlx5e_rx_res_rss_destroy_all(res);
} mlx5e_rx_res_free(res);
void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
kvfree(res);
} }
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix) u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
......
...@@ -21,13 +21,12 @@ enum mlx5e_rx_res_features { ...@@ -21,13 +21,12 @@ enum mlx5e_rx_res_features {
}; };
/* Setup */ /* Setup */
struct mlx5e_rx_res *mlx5e_rx_res_alloc(void); struct mlx5e_rx_res *
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
enum mlx5e_rx_res_features features, unsigned int max_nch, unsigned int max_nch, u32 drop_rqn,
u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch); unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res); void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
/* TIRN getters for flow steering */ /* TIRN getters for flow steering */
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix); u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
...@@ -60,6 +59,7 @@ int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx); ...@@ -60,6 +59,7 @@ int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res); int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss); int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch);
/* Workaround for hairpin */ /* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
......
...@@ -56,7 +56,7 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x) ...@@ -56,7 +56,7 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle; return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
} }
static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work) static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
{ {
struct mlx5e_ipsec_dwork *dwork = struct mlx5e_ipsec_dwork *dwork =
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work); container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
...@@ -486,9 +486,15 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, ...@@ -486,9 +486,15 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL; return -EINVAL;
} }
if (x->lft.hard_byte_limit != XFRM_INF || if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit &&
x->lft.soft_byte_limit != XFRM_INF) { x->lft.hard_byte_limit != XFRM_INF) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes"); /* XFRM stack doesn't prevent such configuration :(. */
NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one");
return -EINVAL;
}
if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) {
NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0");
return -EINVAL; return -EINVAL;
} }
...@@ -624,11 +630,10 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -624,11 +630,10 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return 0; return 0;
if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
return 0;
if (x->lft.soft_packet_limit == XFRM_INF && if (x->lft.soft_packet_limit == XFRM_INF &&
x->lft.hard_packet_limit == XFRM_INF) x->lft.hard_packet_limit == XFRM_INF &&
x->lft.soft_byte_limit == XFRM_INF &&
x->lft.hard_byte_limit == XFRM_INF)
return 0; return 0;
dwork = kzalloc(sizeof(*dwork), GFP_KERNEL); dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
...@@ -636,7 +641,7 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -636,7 +641,7 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return -ENOMEM; return -ENOMEM;
dwork->sa_entry = sa_entry; dwork->sa_entry = sa_entry;
INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit); INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits);
sa_entry->dwork = dwork; sa_entry->dwork = dwork;
return 0; return 0;
} }
......
...@@ -1326,15 +1326,17 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -1326,15 +1326,17 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_no_frags(spec); setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec); setup_fte_upper_proto_match(spec, &attrs->upspec);
if (rx != ipsec->rx_esw) if (!attrs->drop) {
err = setup_modify_header(ipsec, attrs->type, if (rx != ipsec->rx_esw)
sa_entry->ipsec_obj_id | BIT(31), err = setup_modify_header(ipsec, attrs->type,
XFRM_DEV_OFFLOAD_IN, &flow_act); sa_entry->ipsec_obj_id | BIT(31),
else XFRM_DEV_OFFLOAD_IN, &flow_act);
err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act); else
err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
if (err) if (err)
goto err_mod_header; goto err_mod_header;
}
switch (attrs->type) { switch (attrs->type) {
case XFRM_DEV_OFFLOAD_PACKET: case XFRM_DEV_OFFLOAD_PACKET:
...@@ -1384,7 +1386,8 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -1384,7 +1386,8 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
if (flow_act.pkt_reformat) if (flow_act.pkt_reformat)
mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat); mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
err_pkt_reformat: err_pkt_reformat:
mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr); if (flow_act.modify_hdr)
mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
err_mod_header: err_mod_header:
kvfree(spec); kvfree(spec);
err_alloc: err_alloc:
...@@ -1882,7 +1885,8 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) ...@@ -1882,7 +1885,8 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
return; return;
} }
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr); if (ipsec_rule->modify_hdr)
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry); mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type); rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
} }
......
...@@ -54,7 +54,6 @@ struct mlx5e_accel_tx_ipsec_state { ...@@ -54,7 +54,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x, void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo); struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
......
...@@ -1247,7 +1247,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) ...@@ -1247,7 +1247,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv) u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv)
{ {
return MLX5E_INDIR_RQT_SIZE; return mlx5e_rqt_size(priv->mdev, priv->channels.params.num_channels);
} }
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
......
...@@ -1283,9 +1283,7 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs, ...@@ -1283,9 +1283,7 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params); mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev, fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
&ttc_params); &ttc_params);
if (IS_ERR(fs->inner_ttc)) return PTR_ERR_OR_ZERO(fs->inner_ttc);
return PTR_ERR(fs->inner_ttc);
return 0;
} }
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs, int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
...@@ -1295,9 +1293,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs, ...@@ -1295,9 +1293,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true); mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params); fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
if (IS_ERR(fs->ttc)) return PTR_ERR_OR_ZERO(fs->ttc);
return PTR_ERR(fs->ttc);
return 0;
} }
int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs, int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
......
...@@ -2948,8 +2948,12 @@ static int mlx5e_num_channels_changed(struct mlx5e_priv *priv) ...@@ -2948,8 +2948,12 @@ static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
/* This function may be called on attach, before priv->rx_res is created. */ /* This function may be called on attach, before priv->rx_res is created. */
if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res) if (priv->rx_res) {
mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count); mlx5e_rx_res_rss_update_num_channels(priv->rx_res, count);
if (!netif_is_rxfh_configured(priv->netdev))
mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
}
return 0; return 0;
} }
...@@ -5389,10 +5393,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -5389,10 +5393,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
enum mlx5e_rx_res_features features; enum mlx5e_rx_res_features features;
int err; int err;
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res)
return -ENOMEM;
mlx5e_create_q_counters(priv); mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq); err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
...@@ -5404,12 +5404,16 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -5404,12 +5404,16 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_PTP; features = MLX5E_RX_RES_FEATURE_PTP;
if (mlx5_tunnel_inner_ft_supported(mdev)) if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT; features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
priv->max_nch, priv->drop_rq.rqn, priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge, &priv->channels.params.packet_merge,
priv->channels.params.num_channels); priv->channels.params.num_channels);
if (err) if (IS_ERR(priv->rx_res)) {
err = PTR_ERR(priv->rx_res);
priv->rx_res = NULL;
mlx5_core_err(mdev, "create rx resources failed, %d\n", err);
goto err_close_drop_rq; goto err_close_drop_rq;
}
err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile, err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
priv->netdev); priv->netdev);
...@@ -5439,12 +5443,11 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -5439,12 +5443,11 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
priv->profile); priv->profile);
err_destroy_rx_res: err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res); mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = NULL;
err_close_drop_rq: err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters: err_destroy_q_counters:
mlx5e_destroy_q_counters(priv); mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
return err; return err;
} }
...@@ -5455,10 +5458,9 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) ...@@ -5455,10 +5458,9 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
priv->profile); priv->profile);
mlx5e_rx_res_destroy(priv->rx_res); mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = NULL;
mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv); mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
} }
static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv) static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
......
...@@ -998,26 +998,22 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -998,26 +998,22 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
goto err_free_fs;
}
mlx5e_fs_init_l2_addr(priv->fs, priv->netdev); mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq); err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) { if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err); mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_rx_res_free; goto err_free_fs;
} }
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
priv->max_nch, priv->drop_rq.rqn, &priv->channels.params.packet_merge,
&priv->channels.params.packet_merge, priv->channels.params.num_channels);
priv->channels.params.num_channels); if (IS_ERR(priv->rx_res)) {
if (err) err = PTR_ERR(priv->rx_res);
mlx5_core_err(mdev, "Create rx resources failed, err=%d\n", err);
goto err_close_drop_rq; goto err_close_drop_rq;
}
err = mlx5e_create_rep_ttc_table(priv); err = mlx5e_create_rep_ttc_table(priv);
if (err) if (err)
...@@ -1041,11 +1037,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -1041,11 +1037,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
err_destroy_rx_res: err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res); mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = ERR_PTR(-EINVAL);
err_close_drop_rq: err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_close_drop_rq(&priv->drop_rq);
err_rx_res_free:
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
err_free_fs: err_free_fs:
mlx5e_fs_cleanup(priv->fs); mlx5e_fs_cleanup(priv->fs);
priv->fs = NULL; priv->fs = NULL;
...@@ -1059,9 +1053,8 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) ...@@ -1059,9 +1053,8 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_destroy_rep_root_ft(priv); mlx5e_destroy_rep_root_ft(priv);
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false)); mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
mlx5e_rx_res_destroy(priv->rx_res); mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = ERR_PTR(-EINVAL);
mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
} }
static void mlx5e_rep_mpesw_work(struct work_struct *work) static void mlx5e_rep_mpesw_work(struct work_struct *work)
......
...@@ -753,19 +753,21 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) ...@@ -753,19 +753,21 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
{ {
struct mlx5e_priv *priv = hp->func_priv; struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rss_params_indir *indir; struct mlx5e_rss_params_indir indir;
int err; int err;
indir = kvmalloc(sizeof(*indir), GFP_KERNEL); err = mlx5e_rss_params_indir_init(&indir, mdev,
if (!indir) mlx5e_rqt_size(mdev, hp->num_channels),
return -ENOMEM; mlx5e_rqt_size(mdev, priv->max_nch));
if (err)
return err;
mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels);
err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
indir); &indir);
kvfree(indir); mlx5e_rss_params_indir_cleanup(&indir);
return err; return err;
} }
......
...@@ -441,8 +441,3 @@ int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int ev ...@@ -441,8 +441,3 @@ int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int ev
return blocking_notifier_call_chain(&events->sw_nh, event, data); return blocking_notifier_call_chain(&events->sw_nh, event, data);
} }
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work)
{
queue_work(dev->priv.events->wq, work);
}
...@@ -418,12 +418,6 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) ...@@ -418,12 +418,6 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
return -ENOMEM; return -ENOMEM;
} }
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
goto err_free_fs;
}
mlx5e_create_q_counters(priv); mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq); err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
...@@ -432,12 +426,13 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) ...@@ -432,12 +426,13 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters; goto err_destroy_q_counters;
} }
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
priv->max_nch, priv->drop_rq.rqn, &priv->channels.params.packet_merge,
&priv->channels.params.packet_merge, priv->channels.params.num_channels);
priv->channels.params.num_channels); if (IS_ERR(priv->rx_res)) {
if (err) err = PTR_ERR(priv->rx_res);
goto err_close_drop_rq; goto err_close_drop_rq;
}
err = mlx5i_create_flow_steering(priv); err = mlx5i_create_flow_steering(priv);
if (err) if (err)
...@@ -447,13 +442,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) ...@@ -447,13 +442,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_rx_res: err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res); mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = ERR_PTR(-EINVAL);
err_close_drop_rq: err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters: err_destroy_q_counters:
mlx5e_destroy_q_counters(priv); mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
err_free_fs:
mlx5e_fs_cleanup(priv->fs); mlx5e_fs_cleanup(priv->fs);
return err; return err;
} }
...@@ -462,10 +455,9 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) ...@@ -462,10 +455,9 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{ {
mlx5i_destroy_flow_steering(priv); mlx5i_destroy_flow_steering(priv);
mlx5e_rx_res_destroy(priv->rx_res); mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = ERR_PTR(-EINVAL);
mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv); mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
mlx5e_fs_cleanup(priv->fs); mlx5e_fs_cleanup(priv->fs);
} }
......
...@@ -943,6 +943,26 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) ...@@ -943,6 +943,26 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} }
} }
/* The last mdev to unregister will destroy the workqueue before removing the
* devcom component, and as all the mdevs use the same devcom component we are
* guaranteed that the devcom is valid while the calling work is running.
*/
struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
{
struct mlx5_devcom_comp_dev *devcom = NULL;
int i;
mutex_lock(&ldev->lock);
for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[i].dev) {
devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
break;
}
}
mutex_unlock(&ldev->lock);
return devcom;
}
static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
{ {
queue_delayed_work(ldev->wq, &ldev->bond_work, delay); queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
...@@ -953,9 +973,14 @@ static void mlx5_do_bond_work(struct work_struct *work) ...@@ -953,9 +973,14 @@ static void mlx5_do_bond_work(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work); struct delayed_work *delayed_work = to_delayed_work(work);
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
bond_work); bond_work);
struct mlx5_devcom_comp_dev *devcom;
int status; int status;
status = mlx5_dev_list_trylock(); devcom = mlx5_lag_get_devcom_comp(ldev);
if (!devcom)
return;
status = mlx5_devcom_comp_trylock(devcom);
if (!status) { if (!status) {
mlx5_queue_bond_work(ldev, HZ); mlx5_queue_bond_work(ldev, HZ);
return; return;
...@@ -964,14 +989,14 @@ static void mlx5_do_bond_work(struct work_struct *work) ...@@ -964,14 +989,14 @@ static void mlx5_do_bond_work(struct work_struct *work)
mutex_lock(&ldev->lock); mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) { if (ldev->mode_changes_in_progress) {
mutex_unlock(&ldev->lock); mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock(); mlx5_devcom_comp_unlock(devcom);
mlx5_queue_bond_work(ldev, HZ); mlx5_queue_bond_work(ldev, HZ);
return; return;
} }
mlx5_do_bond(ldev); mlx5_do_bond(ldev);
mutex_unlock(&ldev->lock); mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock(); mlx5_devcom_comp_unlock(devcom);
} }
static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev, static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
...@@ -1212,13 +1237,14 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, ...@@ -1212,13 +1237,14 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
dev->priv.lag = NULL; dev->priv.lag = NULL;
} }
/* Must be called with intf_mutex held */ /* Must be called with HCA devcom component lock held */
static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
{ {
struct mlx5_devcom_comp_dev *pos = NULL;
struct mlx5_lag *ldev = NULL; struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev; struct mlx5_core_dev *tmp_dev;
tmp_dev = mlx5_get_next_phys_dev_lag(dev); tmp_dev = mlx5_devcom_get_next_peer_data(dev->priv.hca_devcom_comp, &pos);
if (tmp_dev) if (tmp_dev)
ldev = mlx5_lag_dev(tmp_dev); ldev = mlx5_lag_dev(tmp_dev);
...@@ -1275,10 +1301,13 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) ...@@ -1275,10 +1301,13 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
if (!mlx5_lag_is_supported(dev)) if (!mlx5_lag_is_supported(dev))
return; return;
if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
return;
recheck: recheck:
mlx5_dev_list_lock(); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
err = __mlx5_lag_dev_add_mdev(dev); err = __mlx5_lag_dev_add_mdev(dev);
mlx5_dev_list_unlock(); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
if (err) { if (err) {
msleep(100); msleep(100);
...@@ -1431,7 +1460,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) ...@@ -1431,7 +1460,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
if (!ldev) if (!ldev)
return; return;
mlx5_dev_list_lock(); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
mutex_lock(&ldev->lock); mutex_lock(&ldev->lock);
ldev->mode_changes_in_progress++; ldev->mode_changes_in_progress++;
...@@ -1439,7 +1468,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev) ...@@ -1439,7 +1468,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
mlx5_disable_lag(ldev); mlx5_disable_lag(ldev);
mutex_unlock(&ldev->lock); mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock(); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
} }
void mlx5_lag_enable_change(struct mlx5_core_dev *dev) void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
......
...@@ -112,6 +112,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev); ...@@ -112,6 +112,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev);
void mlx5_lag_remove_devices(struct mlx5_lag *ldev); void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
int mlx5_deactivate_lag(struct mlx5_lag *ldev); int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev); void mlx5_lag_add_devices(struct mlx5_lag *ldev);
struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);
static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev) static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{ {
......
...@@ -129,9 +129,14 @@ static void disable_mpesw(struct mlx5_lag *ldev) ...@@ -129,9 +129,14 @@ static void disable_mpesw(struct mlx5_lag *ldev)
static void mlx5_mpesw_work(struct work_struct *work) static void mlx5_mpesw_work(struct work_struct *work)
{ {
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work); struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
struct mlx5_devcom_comp_dev *devcom;
struct mlx5_lag *ldev = mpesww->lag; struct mlx5_lag *ldev = mpesww->lag;
mlx5_dev_list_lock(); devcom = mlx5_lag_get_devcom_comp(ldev);
if (!devcom)
return;
mlx5_devcom_comp_lock(devcom);
mutex_lock(&ldev->lock); mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) { if (ldev->mode_changes_in_progress) {
mpesww->result = -EAGAIN; mpesww->result = -EAGAIN;
...@@ -144,7 +149,7 @@ static void mlx5_mpesw_work(struct work_struct *work) ...@@ -144,7 +149,7 @@ static void mlx5_mpesw_work(struct work_struct *work)
disable_mpesw(ldev); disable_mpesw(ldev);
unlock: unlock:
mutex_unlock(&ldev->lock); mutex_unlock(&ldev->lock);
mlx5_dev_list_unlock(); mlx5_devcom_comp_unlock(devcom);
complete(&mpesww->comp); complete(&mpesww->comp);
} }
......
...@@ -507,10 +507,7 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev) ...@@ -507,10 +507,7 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params); mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params); port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
if (IS_ERR(port_sel->outer.ttc)) return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
return PTR_ERR(port_sel->outer.ttc);
return 0;
} }
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
...@@ -521,10 +518,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev) ...@@ -521,10 +518,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
if (IS_ERR(port_sel->inner.ttc)) return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
return PTR_ERR(port_sel->inner.ttc);
return 0;
} }
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
......
...@@ -31,6 +31,7 @@ struct mlx5_devcom_comp { ...@@ -31,6 +31,7 @@ struct mlx5_devcom_comp {
struct kref ref; struct kref ref;
bool ready; bool ready;
struct rw_semaphore sem; struct rw_semaphore sem;
struct lock_class_key lock_key;
}; };
struct mlx5_devcom_comp_dev { struct mlx5_devcom_comp_dev {
...@@ -119,6 +120,8 @@ mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler) ...@@ -119,6 +120,8 @@ mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
comp->key = key; comp->key = key;
comp->handler = handler; comp->handler = handler;
init_rwsem(&comp->sem); init_rwsem(&comp->sem);
lockdep_register_key(&comp->lock_key);
lockdep_set_class(&comp->sem, &comp->lock_key);
kref_init(&comp->ref); kref_init(&comp->ref);
INIT_LIST_HEAD(&comp->comp_dev_list_head); INIT_LIST_HEAD(&comp->comp_dev_list_head);
...@@ -133,6 +136,7 @@ mlx5_devcom_comp_release(struct kref *ref) ...@@ -133,6 +136,7 @@ mlx5_devcom_comp_release(struct kref *ref)
mutex_lock(&comp_list_lock); mutex_lock(&comp_list_lock);
list_del(&comp->comp_list); list_del(&comp->comp_list);
mutex_unlock(&comp_list_lock); mutex_unlock(&comp_list_lock);
lockdep_unregister_key(&comp->lock_key);
kfree(comp); kfree(comp);
} }
...@@ -383,3 +387,24 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, ...@@ -383,3 +387,24 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
*pos = tmp; *pos = tmp;
return data; return data;
} }
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
{
if (IS_ERR_OR_NULL(devcom))
return;
down_write(&devcom->comp->sem);
}
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
{
if (IS_ERR_OR_NULL(devcom))
return;
up_write(&devcom->comp->sem);
}
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
{
if (IS_ERR_OR_NULL(devcom))
return 0;
return down_write_trylock(&devcom->comp->sem);
}
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
enum mlx5_devcom_component { enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
MLX5_DEVCOM_MPV, MLX5_DEVCOM_MPV,
MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_NUM_COMPONENTS, MLX5_DEVCOM_NUM_COMPONENTS,
}; };
...@@ -52,4 +53,8 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, ...@@ -52,4 +53,8 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
data; \ data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos)) data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom);
void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom);
int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom);
#endif /* __LIB_MLX5_DEVCOM_H__ */ #endif /* __LIB_MLX5_DEVCOM_H__ */
...@@ -85,7 +85,6 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); ...@@ -85,7 +85,6 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn); struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev); struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
void mlx5_cq_tasklet_cb(struct tasklet_struct *t); void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq); u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev); void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
......
...@@ -73,6 +73,7 @@ ...@@ -73,6 +73,7 @@
#include "sf/sf.h" #include "sf/sf.h"
#include "mlx5_irq.h" #include "mlx5_irq.h"
#include "hwmon.h" #include "hwmon.h"
#include "lag/lag.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
...@@ -952,6 +953,27 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev) ...@@ -952,6 +953,27 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
mlx5_pci_disable_device(dev); mlx5_pci_disable_device(dev);
} }
static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
{
/* This component is use to sync adding core_dev to lag_dev and to sync
* changes of mlx5_adev_devices between LAG layer and other layers.
*/
if (!mlx5_lag_is_supported(dev))
return;
dev->priv.hca_devcom_comp =
mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
mlx5_query_nic_system_image_guid(dev),
NULL, dev);
if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
mlx5_core_err(dev, "Failed to register devcom HCA component\n");
}
static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
{
mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
}
static int mlx5_init_once(struct mlx5_core_dev *dev) static int mlx5_init_once(struct mlx5_core_dev *dev)
{ {
int err; int err;
...@@ -960,6 +982,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -960,6 +982,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
if (IS_ERR(dev->priv.devc)) if (IS_ERR(dev->priv.devc))
mlx5_core_warn(dev, "failed to register devcom device %ld\n", mlx5_core_warn(dev, "failed to register devcom device %ld\n",
PTR_ERR(dev->priv.devc)); PTR_ERR(dev->priv.devc));
mlx5_register_hca_devcom_comp(dev);
err = mlx5_query_board_id(dev); err = mlx5_query_board_id(dev);
if (err) { if (err) {
...@@ -1094,6 +1117,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -1094,6 +1117,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err_irq_cleanup: err_irq_cleanup:
mlx5_irq_table_cleanup(dev); mlx5_irq_table_cleanup(dev);
err_devcom: err_devcom:
mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc); mlx5_devcom_unregister_device(dev->priv.devc);
return err; return err;
...@@ -1123,6 +1147,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -1123,6 +1147,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev); mlx5_irq_table_cleanup(dev);
mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc); mlx5_devcom_unregister_device(dev->priv.devc);
} }
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include "lib/devcom.h"
extern uint mlx5_core_debug_mask; extern uint mlx5_core_debug_mask;
...@@ -159,6 +160,8 @@ enum mlx5_semaphore_space_address { ...@@ -159,6 +160,8 @@ enum mlx5_semaphore_space_address {
#define MLX5_DEFAULT_PROF 2 #define MLX5_DEFAULT_PROF 2
#define MLX5_SF_PROF 3 #define MLX5_SF_PROF 3
#define MLX5_NUM_FW_CMD_THREADS 8
#define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS
static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed, static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
size_t item_size, size_t num_items, size_t item_size, size_t num_items,
...@@ -264,10 +267,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev); ...@@ -264,10 +267,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
...@@ -306,14 +305,12 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) ...@@ -306,14 +305,12 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
{ {
int ret; int ret;
mlx5_dev_list_lock(); mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
ret = mlx5_rescan_drivers_locked(dev); ret = mlx5_rescan_drivers_locked(dev);
mlx5_dev_list_unlock(); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
return ret; return ret;
} }
void mlx5_lag_update(struct mlx5_core_dev *dev);
enum { enum {
MLX5_NIC_IFC_FULL = 0, MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1, MLX5_NIC_IFC_DISABLED = 1,
...@@ -347,7 +344,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap ...@@ -347,7 +344,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL) mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
......
...@@ -17,13 +17,19 @@ struct mlx5_sf_dev_table { ...@@ -17,13 +17,19 @@ struct mlx5_sf_dev_table {
phys_addr_t base_address; phys_addr_t base_address;
u64 sf_bar_length; u64 sf_bar_length;
struct notifier_block nb; struct notifier_block nb;
struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
struct workqueue_struct *active_wq; struct workqueue_struct *active_wq;
struct work_struct work; struct work_struct work;
u8 stop_active_wq:1; u8 stop_active_wq:1;
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
}; };
struct mlx5_sf_dev_active_work_ctx {
struct work_struct work;
struct mlx5_vhca_state_event event;
struct mlx5_sf_dev_table *table;
int sf_index;
};
static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev) static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
{ {
return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev); return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
...@@ -165,7 +171,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ ...@@ -165,7 +171,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
return 0; return 0;
sf_index = event->function_id - base_id; sf_index = event->function_id - base_id;
mutex_lock(&table->table_lock);
sf_dev = xa_load(&table->devices, sf_index); sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) { switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_INVALID: case MLX5_VHCA_STATE_INVALID:
...@@ -189,7 +194,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_ ...@@ -189,7 +194,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
default: default:
break; break;
} }
mutex_unlock(&table->table_lock);
return 0; return 0;
} }
...@@ -214,15 +218,44 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) ...@@ -214,15 +218,44 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
return 0; return 0;
} }
static void mlx5_sf_dev_add_active_work(struct work_struct *work) static void mlx5_sf_dev_add_active_work(struct work_struct *_work)
{ {
struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work); struct mlx5_sf_dev_active_work_ctx *work_ctx;
work_ctx = container_of(_work, struct mlx5_sf_dev_active_work_ctx, work);
if (work_ctx->table->stop_active_wq)
goto out;
/* Don't probe device which is already probe */
if (!xa_load(&work_ctx->table->devices, work_ctx->sf_index))
mlx5_sf_dev_add(work_ctx->table->dev, work_ctx->sf_index,
work_ctx->event.function_id, work_ctx->event.sw_function_id);
/* There is a race where SF got inactive after the query
* above. e.g.: the query returns that the state of the
* SF is active, and after that the eswitch manager set it to
* inactive.
* This case cannot be managed in SW, since the probing of the
* SF is on one system, and the inactivation is on a different
* system.
* If the inactive is done after the SF perform init_hca(),
* the SF will fully probe and then removed. If it was
* done before init_hca(), the SF probe will fail.
*/
out:
kfree(work_ctx);
}
/* In case SFs are generated externally, probe active SFs */
static void mlx5_sf_dev_queue_active_works(struct work_struct *_work)
{
struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work);
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {}; u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
struct mlx5_sf_dev_active_work_ctx *work_ctx;
struct mlx5_core_dev *dev = table->dev; struct mlx5_core_dev *dev = table->dev;
u16 max_functions; u16 max_functions;
u16 function_id; u16 function_id;
u16 sw_func_id; u16 sw_func_id;
int err = 0; int err = 0;
int wq_idx;
u8 state; u8 state;
int i; int i;
...@@ -242,27 +275,22 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work) ...@@ -242,27 +275,22 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work)
continue; continue;
sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id); sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
mutex_lock(&table->table_lock); work_ctx = kzalloc(sizeof(*work_ctx), GFP_KERNEL);
/* Don't probe device which is already probe */ if (!work_ctx)
if (!xa_load(&table->devices, i)) return;
mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
/* There is a race where SF got inactive after the query INIT_WORK(&work_ctx->work, &mlx5_sf_dev_add_active_work);
* above. e.g.: the query returns that the state of the work_ctx->event.function_id = function_id;
* SF is active, and after that the eswitch manager set it to work_ctx->event.sw_function_id = sw_func_id;
* inactive. work_ctx->table = table;
* This case cannot be managed in SW, since the probing of the work_ctx->sf_index = i;
* SF is on one system, and the inactivation is on a different wq_idx = work_ctx->event.function_id % MLX5_DEV_MAX_WQS;
* system. mlx5_vhca_events_work_enqueue(dev, wq_idx, &work_ctx->work);
* If the inactive is done after the SF perform init_hca(),
* the SF will fully probe and then removed. If it was
* done before init_hca(), the SF probe will fail.
*/
mutex_unlock(&table->table_lock);
} }
} }
/* In case SFs are generated externally, probe active SFs */ /* In case SFs are generated externally, probe active SFs */
static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) static int mlx5_sf_dev_create_active_works(struct mlx5_sf_dev_table *table)
{ {
if (MLX5_CAP_GEN(table->dev, eswitch_manager)) if (MLX5_CAP_GEN(table->dev, eswitch_manager))
return 0; /* the table is local */ return 0; /* the table is local */
...@@ -273,12 +301,12 @@ static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) ...@@ -273,12 +301,12 @@ static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
table->active_wq = create_singlethread_workqueue("mlx5_active_sf"); table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
if (!table->active_wq) if (!table->active_wq)
return -ENOMEM; return -ENOMEM;
INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work); INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works);
queue_work(table->active_wq, &table->work); queue_work(table->active_wq, &table->work);
return 0; return 0;
} }
static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
{ {
if (table->active_wq) { if (table->active_wq) {
table->stop_active_wq = true; table->stop_active_wq = true;
...@@ -305,14 +333,13 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) ...@@ -305,14 +333,13 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2); table->base_address = pci_resource_start(dev->pdev, 2);
xa_init(&table->devices); xa_init(&table->devices);
mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table; dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb); err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err) if (err)
goto vhca_err; goto vhca_err;
err = mlx5_sf_dev_queue_active_work(table); err = mlx5_sf_dev_create_active_works(table);
if (err) if (err)
goto add_active_err; goto add_active_err;
...@@ -322,9 +349,10 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev) ...@@ -322,9 +349,10 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
return; return;
arm_err: arm_err:
mlx5_sf_dev_destroy_active_work(table); mlx5_sf_dev_destroy_active_works(table);
add_active_err: add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb); mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mlx5_vhca_event_work_queues_flush(dev);
vhca_err: vhca_err:
kfree(table); kfree(table);
dev->priv.sf_dev_table = NULL; dev->priv.sf_dev_table = NULL;
...@@ -350,9 +378,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev) ...@@ -350,9 +378,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
if (!table) if (!table)
return; return;
mlx5_sf_dev_destroy_active_work(table); mlx5_sf_dev_destroy_active_works(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb); mlx5_vhca_event_notifier_unregister(dev, &table->nb);
mutex_destroy(&table->table_lock); mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy /* Now that event handler is not running, it is safe to destroy
* the sf device without race. * the sf device without race.
......
...@@ -21,6 +21,15 @@ struct mlx5_vhca_event_work { ...@@ -21,6 +21,15 @@ struct mlx5_vhca_event_work {
struct mlx5_vhca_state_event event; struct mlx5_vhca_state_event event;
}; };
struct mlx5_vhca_event_handler {
struct workqueue_struct *wq;
};
struct mlx5_vhca_events {
struct mlx5_core_dev *dev;
struct mlx5_vhca_event_handler handler[MLX5_DEV_MAX_WQS];
};
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen) int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
{ {
u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {}; u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
...@@ -99,6 +108,11 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work) ...@@ -99,6 +108,11 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work)
kfree(work); kfree(work);
} }
void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work)
{
queue_work(dev->priv.vhca_events->handler[idx].wq, work);
}
static int static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data) mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{ {
...@@ -106,6 +120,7 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v ...@@ -106,6 +120,7 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb); mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
struct mlx5_vhca_event_work *work; struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data; struct mlx5_eqe *eqe = data;
int wq_idx;
work = kzalloc(sizeof(*work), GFP_ATOMIC); work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) if (!work)
...@@ -113,7 +128,8 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v ...@@ -113,7 +128,8 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler); INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
work->notifier = notifier; work->notifier = notifier;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
mlx5_events_work_enqueue(notifier->dev, &work->work); wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
return NOTIFY_OK; return NOTIFY_OK;
} }
...@@ -132,28 +148,75 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) ...@@ -132,28 +148,75 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
int mlx5_vhca_event_init(struct mlx5_core_dev *dev) int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{ {
struct mlx5_vhca_state_notifier *notifier; struct mlx5_vhca_state_notifier *notifier;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct mlx5_vhca_events *events;
int err, i;
if (!mlx5_vhca_event_supported(dev)) if (!mlx5_vhca_event_supported(dev))
return 0; return 0;
notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); events = kzalloc(sizeof(*events), GFP_KERNEL);
if (!notifier) if (!events)
return -ENOMEM; return -ENOMEM;
events->dev = dev;
dev->priv.vhca_events = events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
events->handler[i].wq = create_singlethread_workqueue(wq_name);
if (!events->handler[i].wq) {
err = -ENOMEM;
goto err_create_wq;
}
}
notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
if (!notifier) {
err = -ENOMEM;
goto err_notifier;
}
dev->priv.vhca_state_notifier = notifier; dev->priv.vhca_state_notifier = notifier;
notifier->dev = dev; notifier->dev = dev;
BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head); BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE); MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0; return 0;
err_notifier:
err_create_wq:
for (--i; i >= 0; i--)
destroy_workqueue(events->handler[i].wq);
kfree(events);
return err;
}
void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_events *vhca_events;
int i;
if (!mlx5_vhca_event_supported(dev))
return;
vhca_events = dev->priv.vhca_events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
flush_workqueue(vhca_events->handler[i].wq);
} }
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev) void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
{ {
struct mlx5_vhca_events *vhca_events;
int i;
if (!mlx5_vhca_event_supported(dev)) if (!mlx5_vhca_event_supported(dev))
return; return;
kfree(dev->priv.vhca_state_notifier); kfree(dev->priv.vhca_state_notifier);
dev->priv.vhca_state_notifier = NULL; dev->priv.vhca_state_notifier = NULL;
vhca_events = dev->priv.vhca_events;
for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
destroy_workqueue(vhca_events->handler[i].wq);
kvfree(vhca_events);
} }
void mlx5_vhca_event_start(struct mlx5_core_dev *dev) void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
......
...@@ -28,6 +28,9 @@ int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn ...@@ -28,6 +28,9 @@ int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id); int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id);
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
u32 *out, u32 outlen); u32 *out, u32 outlen);
void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work);
void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev);
#else #else
static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap) static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
......
...@@ -436,10 +436,6 @@ void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx, ...@@ -436,10 +436,6 @@ void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
bool inner, bool rx); bool inner, bool rx);
void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx, void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb, struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, struct mlx5dr_match_param *mask,
......
...@@ -615,6 +615,7 @@ struct mlx5_priv { ...@@ -615,6 +615,7 @@ struct mlx5_priv {
int adev_idx; int adev_idx;
int sw_vhca_id; int sw_vhca_id;
struct mlx5_events *events; struct mlx5_events *events;
struct mlx5_vhca_events *vhca_events;
struct mlx5_flow_steering *steering; struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs; struct mlx5_mpfs *mpfs;
...@@ -623,6 +624,7 @@ struct mlx5_priv { ...@@ -623,6 +624,7 @@ struct mlx5_priv {
struct mlx5_lag *lag; struct mlx5_lag *lag;
u32 flags; u32 flags;
struct mlx5_devcom_dev *devc; struct mlx5_devcom_dev *devc;
struct mlx5_devcom_comp_dev *hca_devcom_comp;
struct mlx5_fw_reset *fw_reset; struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce; struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats; struct mlx5_fc_stats fc_stats;
...@@ -1041,10 +1043,6 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev); ...@@ -1041,10 +1043,6 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node); struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
int inlen); int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
...@@ -1058,8 +1056,6 @@ void mlx5_pagealloc_start(struct mlx5_core_dev *dev); ...@@ -1058,8 +1056,6 @@ void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void); void mlx5_register_debugfs(void);
...@@ -1099,8 +1095,6 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, ...@@ -1099,8 +1095,6 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev); __be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
int mlx5_init_rl_table(struct mlx5_core_dev *dev); int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
...@@ -1202,12 +1196,6 @@ int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, ...@@ -1202,12 +1196,6 @@ int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
int vf_id, int vf_id,
struct notifier_block *nb); struct notifier_block *nb);
#ifdef CONFIG_MLX5_CORE_IPOIB
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
void (*setup)(struct net_device *));
#endif /* CONFIG_MLX5_CORE_IPOIB */
int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device, struct ib_device *device,
struct rdma_netdev_alloc_params *params); struct rdma_netdev_alloc_params *params);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment