Commit 549da338 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2020-02-27' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-02-27

mlx5 misc updates and minor cleanups:

1) Use per vport tables for mirroring
2) Improve log messages for SW steering (DR)
3) Add devlink fdb_large_groups parameter
4) E-Switch, Allow goto earlier chain
5) Don't allow forwarding between uplink representors
6) Add support for devlink-port in non-representors mode
7) Minor misc cleanups
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9f6e0559 bc1d75fa
...@@ -37,6 +37,12 @@ parameters. ...@@ -37,6 +37,12 @@ parameters.
* ``smfs`` Software managed flow steering. In SMFS mode, the HW * ``smfs`` Software managed flow steering. In SMFS mode, the HW
steering entities are created and manage through the driver without steering entities are created and manage through the driver without
firmware intervention. firmware intervention.
* - ``fdb_large_groups``
- u32
- driverinit
- Control the number of large groups (size > 1) in the FDB table.
* The default value is 15, and the range is between 1 and 1024.
The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD`` The ``mlx5`` driver supports reloading via ``DEVLINK_CMD_RELOAD``
......
...@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ ...@@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
# #
# Netdev extra # Netdev extra
......
...@@ -190,11 +190,6 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id, ...@@ -190,11 +190,6 @@ static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
return 0; return 0;
} }
enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
};
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val, union devlink_param_value val,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
...@@ -210,14 +205,38 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id, ...@@ -210,14 +205,38 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
return 0; return 0;
} }
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
int group_num = val.vu32;
if (group_num < 1 || group_num > 1024) {
NL_SET_ERR_MSG_MOD(extack,
"Unsupported group number, supported range is 1-1024");
return -EOPNOTSUPP;
}
return 0;
}
#endif
static const struct devlink_param mlx5_devlink_params[] = { static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING, "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME), BIT(DEVLINK_PARAM_CMODE_RUNTIME),
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set, mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
mlx5_devlink_fs_mode_validate), mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate), NULL, NULL, mlx5_devlink_enable_roce_validate),
#ifdef CONFIG_MLX5_ESWITCH
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
"fdb_large_groups", DEVLINK_PARAM_TYPE_U32,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
#endif
}; };
static void mlx5_devlink_set_params_init_values(struct devlink *devlink) static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
...@@ -230,13 +249,20 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) ...@@ -230,13 +249,20 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
else else
strcpy(value.vstr, "smfs"); strcpy(value.vstr, "smfs");
devlink_param_driverinit_value_set(devlink, devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE, MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
value); value);
value.vbool = MLX5_CAP_GEN(dev, roce); value.vbool = MLX5_CAP_GEN(dev, roce);
devlink_param_driverinit_value_set(devlink, devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
value); value);
#ifdef CONFIG_MLX5_ESWITCH
value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
devlink_param_driverinit_value_set(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
value);
#endif
} }
int mlx5_devlink_register(struct devlink *devlink, struct device *dev) int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
......
...@@ -6,6 +6,12 @@ ...@@ -6,6 +6,12 @@
#include <net/devlink.h> #include <net/devlink.h>
enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
};
struct devlink *mlx5_devlink_alloc(void); struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink); void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev); int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
......
...@@ -880,6 +880,7 @@ struct mlx5e_priv { ...@@ -880,6 +880,7 @@ struct mlx5e_priv {
#endif #endif
struct devlink_health_reporter *tx_reporter; struct devlink_health_reporter *tx_reporter;
struct devlink_health_reporter *rx_reporter; struct devlink_health_reporter *rx_reporter;
struct devlink_port dl_phy_port;
struct mlx5e_xsk xsk; struct mlx5e_xsk xsk;
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE) #if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
struct mlx5e_hv_vhca_stats_agent stats_agent; struct mlx5e_hv_vhca_stats_agent stats_agent;
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include "en/devlink.h"
int mlx5e_devlink_phy_port_register(struct net_device *dev)
{
struct mlx5e_priv *priv;
struct devlink *devlink;
int err;
priv = netdev_priv(dev);
devlink = priv_to_devlink(priv->mdev);
devlink_port_attrs_set(&priv->dl_phy_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
PCI_FUNC(priv->mdev->pdev->devfn),
false, 0,
NULL, 0);
err = devlink_port_register(devlink, &priv->dl_phy_port, 1);
if (err)
return err;
devlink_port_type_eth_set(&priv->dl_phy_port, dev);
return 0;
}
void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv)
{
devlink_port_unregister(&priv->dl_phy_port);
}
struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return &priv->dl_phy_port;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5E_EN_DEVLINK_H
#define __MLX5E_EN_DEVLINK_H
#include <net/devlink.h>
#include "en.h"
int mlx5e_devlink_phy_port_register(struct net_device *dev);
void mlx5e_devlink_phy_port_unregister(struct mlx5e_priv *priv);
struct devlink_port *mlx5e_get_devlink_phy_port(struct net_device *dev);
#endif
...@@ -63,6 +63,7 @@ ...@@ -63,6 +63,7 @@
#include "en/xsk/rx.h" #include "en/xsk/rx.h"
#include "en/xsk/tx.h" #include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h" #include "en/hv_vhca_stats.h"
#include "en/devlink.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
...@@ -4605,6 +4606,7 @@ const struct net_device_ops mlx5e_netdev_ops = { ...@@ -4605,6 +4606,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats, .ndo_get_vf_stats = mlx5e_get_vf_stats,
#endif #endif
.ndo_get_devlink_port = mlx5e_get_devlink_phy_port,
}; };
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
...@@ -5471,11 +5473,19 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) ...@@ -5471,11 +5473,19 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_detach; goto err_detach;
} }
err = mlx5e_devlink_phy_port_register(netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_devlink_phy_port_register failed, %d\n", err);
goto err_unregister_netdev;
}
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_init_app(priv); mlx5e_dcbnl_init_app(priv);
#endif #endif
return priv; return priv;
err_unregister_netdev:
unregister_netdev(netdev);
err_detach: err_detach:
mlx5e_detach(mdev, priv); mlx5e_detach(mdev, priv);
err_destroy_netdev: err_destroy_netdev:
...@@ -5497,6 +5507,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -5497,6 +5507,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv); mlx5e_dcbnl_delete_app(priv);
#endif #endif
mlx5e_devlink_phy_port_unregister(priv);
unregister_netdev(priv->netdev); unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv); mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv); mlx5e_destroy_netdev(priv);
......
...@@ -192,7 +192,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) ...@@ -192,7 +192,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
if (err) { if (err) {
pr_warn("vport %d error %d reading stats\n", rep->vport, err); netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
rep->vport, err);
return; return;
} }
...@@ -1422,7 +1423,7 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan ...@@ -1422,7 +1423,7 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0; return 0;
} }
static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
...@@ -1435,7 +1436,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { ...@@ -1435,7 +1436,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_stop = mlx5e_rep_close, .ndo_stop = mlx5e_rep_close,
.ndo_start_xmit = mlx5e_xmit, .ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_rep_setup_tc, .ndo_setup_tc = mlx5e_rep_setup_tc,
.ndo_get_devlink_port = mlx5e_get_devlink_port, .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_get_stats64 = mlx5e_rep_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
...@@ -1448,7 +1449,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { ...@@ -1448,7 +1449,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_start_xmit = mlx5e_xmit, .ndo_start_xmit = mlx5e_xmit,
.ndo_set_mac_address = mlx5e_uplink_rep_set_mac, .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
.ndo_setup_tc = mlx5e_rep_setup_tc, .ndo_setup_tc = mlx5e_rep_setup_tc,
.ndo_get_devlink_port = mlx5e_get_devlink_port, .ndo_get_devlink_port = mlx5e_rep_get_devlink_port,
.ndo_get_stats64 = mlx5e_get_stats, .ndo_get_stats64 = mlx5e_get_stats,
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
...@@ -1464,6 +1465,11 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { ...@@ -1464,6 +1465,11 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_set_features = mlx5e_set_features, .ndo_set_features = mlx5e_set_features,
}; };
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
}
bool mlx5e_eswitch_rep(struct net_device *netdev) bool mlx5e_eswitch_rep(struct net_device *netdev)
{ {
if (netdev->netdev_ops == &mlx5e_netdev_ops_rep || if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
...@@ -2026,8 +2032,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -2026,8 +2032,9 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile; &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) { if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n", mlx5_core_warn(dev,
rep->vport); "Failed to create representor netdev for vport %d\n",
rep->vport);
kfree(rpriv); kfree(rpriv);
return -EINVAL; return -EINVAL;
} }
...@@ -2045,29 +2052,32 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -2045,29 +2052,32 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
err = mlx5e_attach_netdev(netdev_priv(netdev)); err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) { if (err) {
pr_warn("Failed to attach representor netdev for vport %d\n", netdev_warn(netdev,
rep->vport); "Failed to attach representor netdev for vport %d\n",
rep->vport);
goto err_destroy_mdev_resources; goto err_destroy_mdev_resources;
} }
err = mlx5e_rep_neigh_init(rpriv); err = mlx5e_rep_neigh_init(rpriv);
if (err) { if (err) {
pr_warn("Failed to initialized neighbours handling for vport %d\n", netdev_warn(netdev,
rep->vport); "Failed to initialized neighbours handling for vport %d\n",
rep->vport);
goto err_detach_netdev; goto err_detach_netdev;
} }
err = register_devlink_port(dev, rpriv); err = register_devlink_port(dev, rpriv);
if (err) { if (err) {
esw_warn(dev, "Failed to register devlink port %d\n", netdev_warn(netdev, "Failed to register devlink port %d\n",
rep->vport); rep->vport);
goto err_neigh_cleanup; goto err_neigh_cleanup;
} }
err = register_netdev(netdev); err = register_netdev(netdev);
if (err) { if (err) {
pr_warn("Failed to register representor netdev for vport %d\n", netdev_warn(netdev,
rep->vport); "Failed to register representor netdev for vport %d\n",
rep->vport);
goto err_devlink_cleanup; goto err_devlink_cleanup;
} }
......
...@@ -200,6 +200,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, ...@@ -200,6 +200,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_rep(struct net_device *netdev); bool mlx5e_eswitch_rep(struct net_device *netdev);
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "lib/eq.h" #include "lib/eq.h"
#include "eswitch.h" #include "eswitch.h"
#include "fs_core.h" #include "fs_core.h"
#include "devlink.h"
#include "ecpf.h" #include "ecpf.h"
enum { enum {
...@@ -2006,6 +2007,25 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) ...@@ -2006,6 +2007,25 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
esw_disable_vport(esw, vport); esw_disable_vport(esw, vport);
} }
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
{
struct devlink *devlink = priv_to_devlink(esw->dev);
union devlink_param_value val;
int err;
err = devlink_param_driverinit_value_get(devlink,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
&val);
if (!err) {
esw->params.large_group_num = val.vu32;
} else {
esw_warn(esw->dev,
"Devlink can't get param fdb_large_groups, uses default (%d).\n",
ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
}
}
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{ {
int err; int err;
...@@ -2022,6 +2042,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) ...@@ -2022,6 +2042,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n"); esw_warn(esw->dev, "engress ACL is not supported by FW\n");
mlx5_eswitch_get_devlink_param(esw);
esw_create_tsar(esw); esw_create_tsar(esw);
esw->mode = mode; esw->mode = mode;
......
...@@ -55,6 +55,8 @@ ...@@ -55,6 +55,8 @@
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
#define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
#define MLX5_MAX_UC_PER_VPORT(dev) \ #define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
...@@ -183,6 +185,12 @@ struct mlx5_eswitch_fdb { ...@@ -183,6 +185,12 @@ struct mlx5_eswitch_fdb {
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
struct mlx5_esw_chains_priv *esw_chains_priv; struct mlx5_esw_chains_priv *esw_chains_priv;
struct {
DECLARE_HASHTABLE(table, 8);
/* Protects vports.table */
struct mutex lock;
} vports;
} offloads; } offloads;
}; };
u32 flags; u32 flags;
...@@ -255,6 +263,9 @@ struct mlx5_eswitch { ...@@ -255,6 +263,9 @@ struct mlx5_eswitch {
u16 manager_vport; u16 manager_vport;
u16 first_host_vport; u16 first_host_vport;
struct mlx5_esw_functions esw_funcs; struct mlx5_esw_functions esw_funcs;
struct {
u32 large_group_num;
} params;
}; };
void esw_offloads_disable(struct mlx5_eswitch *esw); void esw_offloads_disable(struct mlx5_eswitch *esw);
...@@ -623,6 +634,9 @@ void ...@@ -623,6 +634,9 @@ void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport); struct mlx5_vport *vport);
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw);
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
...@@ -50,6 +50,181 @@ ...@@ -50,6 +50,181 @@
#define MLX5_ESW_MISS_FLOWS (2) #define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0 #define UPLINK_REP_INDEX 0
/* Per vport tables */
#define MLX5_ESW_VPORT_TABLE_SIZE 128
/* This struct is used as a key to the hash table and we need it to be packed
* so hash result is consistent
*/
struct mlx5_vport_key {
u32 chain;
u16 prio;
u16 vport;
u16 vhca_id;
} __packed;
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
u32 num_rules;
struct mlx5_vport_key key;
};
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
static struct mlx5_flow_table *
esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
ft_attr.prio = FDB_PER_VPORT;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
PTR_ERR(fdb));
}
return fdb;
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
struct mlx5_vport_key *key)
{
key->vport = attr->in_rep->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
return jhash(key, sizeof(*key), 0);
}
/* caller must hold vports.lock */
static struct mlx5_vport_table *
esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
{
struct mlx5_vport_table *e;
hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
if (!memcmp(&e->key, skey, sizeof(*skey)))
return e;
return NULL;
}
static void
esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &key);
e = esw_vport_tbl_lookup(esw, &key, hkey);
if (!e || --e->num_rules)
goto out;
hash_del(&e->hlist);
mlx5_destroy_flow_table(e->fdb);
kfree(e);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
}
static struct mlx5_flow_table *
esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *fdb;
struct mlx5_vport_table *e;
struct mlx5_vport_key skey;
u32 hkey;
mutex_lock(&esw->fdb_table.offloads.vports.lock);
hkey = flow_attr_to_vport_key(esw, attr, &skey);
e = esw_vport_tbl_lookup(esw, &skey, hkey);
if (e) {
e->num_rules++;
goto out;
}
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e) {
fdb = ERR_PTR(-ENOMEM);
goto err_alloc;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!ns) {
esw_warn(dev, "Failed to get FDB namespace\n");
fdb = ERR_PTR(-ENOENT);
goto err_ns;
}
fdb = esw_vport_tbl_create(esw, ns);
if (IS_ERR(fdb))
goto err_ns;
e->fdb = fdb;
e->num_rules = 1;
e->key = skey;
hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
out:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return e->fdb;
err_ns:
kfree(e);
err_alloc:
mutex_unlock(&esw->fdb_table.offloads.vports.lock);
return fdb;
}
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
struct mlx5_esw_flow_attr attr = {};
struct mlx5_eswitch_rep rep = {};
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
attr.prio = 1;
attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.in_rep->vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (!fdb)
goto out;
}
return 0;
out:
mlx5_esw_vport_tbl_put(esw);
return PTR_ERR(fdb);
}
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
struct mlx5_esw_flow_attr attr = {};
struct mlx5_eswitch_rep rep = {};
struct mlx5_vport *vport;
int i;
attr.prio = 1;
attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.in_rep->vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
/* End: Per vport tables */
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num) u16 vport_num)
{ {
...@@ -191,8 +366,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -191,8 +366,6 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
i++; i++;
} }
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE) if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->inner_match_level != MLX5_MATCH_NONE) if (attr->inner_match_level != MLX5_MATCH_NONE)
...@@ -201,8 +374,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -201,8 +374,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr; flow_act.modify_hdr = attr->modify_hdr;
fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, if (split) {
!!split); fdb = esw_vport_tbl_get(esw, attr);
} else {
fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio,
0);
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
}
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb); rule = ERR_CAST(fdb);
goto err_esw_get; goto err_esw_get;
...@@ -221,7 +399,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -221,7 +399,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule; return rule;
err_add_rule: err_add_rule:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, !!split); if (split)
esw_vport_tbl_put(esw, attr);
else
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_esw_get: err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain) if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
...@@ -247,7 +428,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -247,7 +428,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
goto err_get_fast; goto err_get_fast;
} }
fwd_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 1); fwd_fdb = esw_vport_tbl_get(esw, attr);
if (IS_ERR(fwd_fdb)) { if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb); rule = ERR_CAST(fwd_fdb);
goto err_get_fwd; goto err_get_fwd;
...@@ -285,7 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -285,7 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule; return rule;
add_err: add_err:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); esw_vport_tbl_put(esw, attr);
err_get_fwd: err_get_fwd:
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
err_get_fast: err_get_fast:
...@@ -312,11 +493,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -312,11 +493,14 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
atomic64_dec(&esw->offloads.num_flows); atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) { if (fwd_rule) {
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 1); esw_vport_tbl_put(esw, attr);
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0); mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
} else { } else {
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, if (split)
!!split); esw_vport_tbl_put(esw, attr);
else
mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
0);
if (attr->dest_chain) if (attr->dest_chain)
mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0); mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
} }
...@@ -1923,6 +2107,9 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -1923,6 +2107,9 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
if (err) if (err)
goto create_fg_err; goto create_fg_err;
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
return 0; return 0;
create_fg_err: create_fg_err:
...@@ -1939,6 +2126,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) ...@@ -1939,6 +2126,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{ {
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
esw_destroy_vport_rx_group(esw); esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw); esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw); esw_destroy_offloads_fdb_tables(esw);
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
#define fdb_ignore_flow_level_supported(esw) \ #define fdb_ignore_flow_level_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level)) (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
#define ESW_OFFLOADS_NUM_GROUPS 4
/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS), /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
* and a virtual memory region of 16M (ESW_SIZE), this region is duplicated * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
* for each flow table pool. We can allocate up to 16M of each pool, * for each flow table pool. We can allocate up to 16M of each pool,
...@@ -99,6 +97,11 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw) ...@@ -99,6 +97,11 @@ bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED; return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
} }
bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
{
return fdb_ignore_flow_level_supported(esw);
}
u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw) u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
{ {
if (!mlx5_esw_chains_prios_supported(esw)) if (!mlx5_esw_chains_prios_supported(esw))
...@@ -234,7 +237,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw, ...@@ -234,7 +237,7 @@ mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
} }
ft_attr.autogroup.num_reserved_entries = 2; ft_attr.autogroup.num_reserved_entries = 2;
ft_attr.autogroup.max_num_groups = ESW_OFFLOADS_NUM_GROUPS; ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -637,7 +640,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw) ...@@ -637,7 +640,7 @@ mlx5_esw_chains_init(struct mlx5_eswitch *esw)
esw_debug(dev, esw_debug(dev,
"Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n", "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, fdb_max); max_flow_counter, esw->params.large_group_num, fdb_max);
mlx5_esw_chains_init_sz_pool(esw); mlx5_esw_chains_init_sz_pool(esw);
...@@ -704,12 +707,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw) ...@@ -704,12 +707,9 @@ mlx5_esw_chains_open(struct mlx5_eswitch *esw)
/* Open level 1 for split rules now if prios isn't supported */ /* Open level 1 for split rules now if prios isn't supported */
if (!mlx5_esw_chains_prios_supported(esw)) { if (!mlx5_esw_chains_prios_supported(esw)) {
ft = mlx5_esw_chains_get_table(esw, 0, 1, 1); err = mlx5_esw_vport_tbl_get(esw);
if (err)
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto level_1_err; goto level_1_err;
}
} }
return 0; return 0;
...@@ -725,7 +725,7 @@ static void ...@@ -725,7 +725,7 @@ static void
mlx5_esw_chains_close(struct mlx5_eswitch *esw) mlx5_esw_chains_close(struct mlx5_eswitch *esw)
{ {
if (!mlx5_esw_chains_prios_supported(esw)) if (!mlx5_esw_chains_prios_supported(esw))
mlx5_esw_chains_put_table(esw, 0, 1, 1); mlx5_esw_vport_tbl_put(esw);
mlx5_esw_chains_put_table(esw, 0, 1, 0); mlx5_esw_chains_put_table(esw, 0, 1, 0);
mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0); mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
} }
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
bool bool
mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw); mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
bool
mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
u32 u32
mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw); mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
u32 u32
......
...@@ -181,7 +181,7 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src, ...@@ -181,7 +181,7 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
const struct mlx5_flow_spec *spec) const struct mlx5_flow_spec *spec)
{ {
u32 port_mask, port_value; u16 port_mask, port_value;
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return spec->flow_context.flow_source == return spec->flow_context.flow_source ==
...@@ -191,7 +191,7 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw, ...@@ -191,7 +191,7 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
misc_parameters.source_port); misc_parameters.source_port);
port_value = MLX5_GET(fte_match_param, spec->match_value, port_value = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.source_port); misc_parameters.source_port);
return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK; return (port_mask & port_value) == MLX5_VPORT_UPLINK;
} }
bool bool
......
...@@ -2700,6 +2700,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ...@@ -2700,6 +2700,17 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
goto out_err; goto out_err;
} }
/* We put this priority last, knowing that nothing will get here
* unless explicitly forwarded to. This is possible because the
* slow path tables have catch all rules and nothing gets passed
* those tables.
*/
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
set_prio_attrs(steering->fdb_root_ns); set_prio_attrs(steering->fdb_root_ns);
return 0; return 0;
......
...@@ -672,7 +672,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -672,7 +672,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
dest_action = action; dest_action = action;
if (!action->dest_tbl.is_fw_tbl) { if (!action->dest_tbl.is_fw_tbl) {
if (action->dest_tbl.tbl->dmn != dmn) { if (action->dest_tbl.tbl->dmn != dmn) {
mlx5dr_dbg(dmn, mlx5dr_err(dmn,
"Destination table belongs to a different domain\n"); "Destination table belongs to a different domain\n");
goto out_invalid_arg; goto out_invalid_arg;
} }
...@@ -703,7 +703,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -703,7 +703,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
action->dest_tbl.fw_tbl.rx_icm_addr = action->dest_tbl.fw_tbl.rx_icm_addr =
output.sw_owner_icm_root_0; output.sw_owner_icm_root_0;
} else { } else {
mlx5dr_dbg(dmn, mlx5dr_err(dmn,
"Failed mlx5_cmd_query_flow_table ret: %d\n", "Failed mlx5_cmd_query_flow_table ret: %d\n",
ret); ret);
return ret; return ret;
...@@ -772,7 +772,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -772,7 +772,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
/* Check action duplication */ /* Check action duplication */
if (++action_type_set[action_type] > max_actions_type) { if (++action_type_set[action_type] > max_actions_type) {
mlx5dr_dbg(dmn, "Action type %d supports only max %d time(s)\n", mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
action_type, max_actions_type); action_type, max_actions_type);
goto out_invalid_arg; goto out_invalid_arg;
} }
...@@ -781,7 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -781,7 +781,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
if (dr_action_validate_and_get_next_state(action_domain, if (dr_action_validate_and_get_next_state(action_domain,
action_type, action_type,
&state)) { &state)) {
mlx5dr_dbg(dmn, "Invalid action sequence provided\n"); mlx5dr_err(dmn, "Invalid action sequence provided\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} }
...@@ -797,7 +797,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -797,7 +797,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
rx_rule && recalc_cs_required && dest_action) { rx_rule && recalc_cs_required && dest_action) {
ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr); ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
if (ret) { if (ret) {
mlx5dr_dbg(dmn, mlx5dr_err(dmn,
"Failed to handle checksum recalculation err %d\n", "Failed to handle checksum recalculation err %d\n",
ret); ret);
return ret; return ret;
......
...@@ -59,7 +59,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn) ...@@ -59,7 +59,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn); ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) { if (ret) {
mlx5dr_dbg(dmn, "Couldn't allocate PD\n"); mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
return ret; return ret;
} }
...@@ -192,7 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, ...@@ -192,7 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
ret = dr_domain_query_vports(dmn); ret = dr_domain_query_vports(dmn);
if (ret) { if (ret) {
mlx5dr_dbg(dmn, "Failed to query vports caps\n"); mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
goto free_vports_caps; goto free_vports_caps;
} }
...@@ -213,7 +213,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, ...@@ -213,7 +213,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
int ret; int ret;
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) { if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
mlx5dr_dbg(dmn, "Failed to allocate domain, bad link type\n"); mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -257,7 +257,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, ...@@ -257,7 +257,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
if (!vport_cap) { if (!vport_cap) {
mlx5dr_dbg(dmn, "Failed to get esw manager vport\n"); mlx5dr_err(dmn, "Failed to get esw manager vport\n");
return -ENOENT; return -ENOENT;
} }
...@@ -268,7 +268,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, ...@@ -268,7 +268,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address; dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
break; break;
default: default:
mlx5dr_dbg(dmn, "Invalid domain\n"); mlx5dr_err(dmn, "Invalid domain\n");
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
...@@ -300,7 +300,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) ...@@ -300,7 +300,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mutex_init(&dmn->mutex); mutex_init(&dmn->mutex);
if (dr_domain_caps_init(mdev, dmn)) { if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_dbg(dmn, "Failed init domain, no caps\n"); mlx5dr_err(dmn, "Failed init domain, no caps\n");
goto free_domain; goto free_domain;
} }
...@@ -348,8 +348,11 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags) ...@@ -348,8 +348,11 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
mutex_lock(&dmn->mutex); mutex_lock(&dmn->mutex);
ret = mlx5dr_send_ring_force_drain(dmn); ret = mlx5dr_send_ring_force_drain(dmn);
mutex_unlock(&dmn->mutex); mutex_unlock(&dmn->mutex);
if (ret) if (ret) {
mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
flags, ret);
return ret; return ret;
}
} }
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW) if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
......
...@@ -468,7 +468,7 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, ...@@ -468,7 +468,7 @@ mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
if (err) { if (err) {
dr_icm_chill_buckets_abort(pool, bucket, buckets); dr_icm_chill_buckets_abort(pool, bucket, buckets);
mlx5dr_dbg(pool->dmn, "Sync_steering failed\n"); mlx5dr_err(pool->dmn, "Sync_steering failed\n");
chunk = NULL; chunk = NULL;
goto out; goto out;
} }
......
...@@ -388,14 +388,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -388,14 +388,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
if (idx == 0) { if (idx == 0) {
mlx5dr_dbg(dmn, "Cannot generate any valid rules from mask\n"); mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
return -EINVAL; return -EINVAL;
} }
/* Check that all mask fields were consumed */ /* Check that all mask fields were consumed */
for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) { for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
if (((u8 *)&mask)[i] != 0) { if (((u8 *)&mask)[i] != 0) {
mlx5dr_info(dmn, "Mask contains unsupported parameters\n"); mlx5dr_err(dmn, "Mask contains unsupported parameters\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} }
...@@ -563,7 +563,7 @@ static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher, ...@@ -563,7 +563,7 @@ static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6); dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
if (!nic_matcher->ste_builder) { if (!nic_matcher->ste_builder) {
mlx5dr_dbg(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n"); mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
return -EINVAL; return -EINVAL;
} }
...@@ -634,13 +634,13 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, ...@@ -634,13 +634,13 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
int ret; int ret;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) { if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
mlx5dr_info(dmn, "Invalid match criteria attribute\n"); mlx5dr_err(dmn, "Invalid match criteria attribute\n");
return -EINVAL; return -EINVAL;
} }
if (mask) { if (mask) {
if (mask->match_sz > sizeof(struct mlx5dr_match_param)) { if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
mlx5dr_info(dmn, "Invalid match size attribute\n"); mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL; return -EINVAL;
} }
mlx5dr_ste_copy_param(matcher->match_criteria, mlx5dr_ste_copy_param(matcher->match_criteria,
...@@ -671,7 +671,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, ...@@ -671,7 +671,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher * struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *tbl, mlx5dr_matcher_create(struct mlx5dr_table *tbl,
u16 priority, u32 priority,
u8 match_criteria_enable, u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask) struct mlx5dr_match_parameters *mask)
{ {
......
...@@ -826,8 +826,8 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule, ...@@ -826,8 +826,8 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
ste_location, send_ste_list); ste_location, send_ste_list);
if (!new_htbl) { if (!new_htbl) {
mlx5dr_htbl_put(cur_htbl); mlx5dr_htbl_put(cur_htbl);
mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n", mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
cur_htbl->chunk_size); cur_htbl->chunk_size);
} else { } else {
cur_htbl = new_htbl; cur_htbl = new_htbl;
} }
...@@ -877,7 +877,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -877,7 +877,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
if (!value_size || if (!value_size ||
(value_size > sizeof(struct mlx5dr_match_param) || (value_size > sizeof(struct mlx5dr_match_param) ||
(value_size % sizeof(u32)))) { (value_size % sizeof(u32)))) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false; return false;
} }
...@@ -888,7 +888,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -888,7 +888,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->outer), value_size); e_idx = min(s_idx + sizeof(param->outer), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n"); mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
return false; return false;
} }
} }
...@@ -898,7 +898,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -898,7 +898,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc), value_size); e_idx = min(s_idx + sizeof(param->misc), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n"); mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
return false; return false;
} }
} }
...@@ -908,7 +908,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -908,7 +908,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->inner), value_size); e_idx = min(s_idx + sizeof(param->inner), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n"); mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
return false; return false;
} }
} }
...@@ -918,7 +918,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -918,7 +918,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc2), value_size); e_idx = min(s_idx + sizeof(param->misc2), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n"); mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
return false; return false;
} }
} }
...@@ -928,7 +928,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, ...@@ -928,7 +928,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
e_idx = min(s_idx + sizeof(param->misc3), value_size); e_idx = min(s_idx + sizeof(param->misc3), value_size);
if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) { if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n"); mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
return false; return false;
} }
} }
...@@ -1221,7 +1221,7 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher, ...@@ -1221,7 +1221,7 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
dr_rule_remove_action_members(rule); dr_rule_remove_action_members(rule);
free_rule: free_rule:
kfree(rule); kfree(rule);
mlx5dr_info(dmn, "Failed creating rule\n"); mlx5dr_err(dmn, "Failed creating rule\n");
return NULL; return NULL;
} }
......
...@@ -136,7 +136,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev, ...@@ -136,7 +136,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq, err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
&dr_qp->wq_ctrl); &dr_qp->wq_ctrl);
if (err) { if (err) {
mlx5_core_info(mdev, "Can't create QP WQ\n"); mlx5_core_warn(mdev, "Can't create QP WQ\n");
goto err_wq; goto err_wq;
} }
...@@ -651,8 +651,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) ...@@ -651,8 +651,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
/* Init */ /* Init */
ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port); ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
if (ret) if (ret) {
mlx5dr_err(dmn, "Failed modify QP rst2init\n");
return ret; return ret;
}
/* RTR */ /* RTR */
ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr); ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index, &rtr_attr.dgid_attr);
...@@ -667,8 +669,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) ...@@ -667,8 +669,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp; rtr_attr.udp_src_port = dmn->info.caps.roce_min_src_udp;
ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr); ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
if (ret) if (ret) {
mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
return ret; return ret;
}
/* RTS */ /* RTS */
rts_attr.timeout = 14; rts_attr.timeout = 14;
...@@ -676,8 +680,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn) ...@@ -676,8 +680,10 @@ static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
rts_attr.rnr_retry = 7; rts_attr.rnr_retry = 7;
ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr); ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
if (ret) if (ret) {
mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
return ret; return ret;
}
return 0; return 0;
} }
...@@ -861,6 +867,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) ...@@ -861,6 +867,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
cq_size = QUEUE_SIZE + 1; cq_size = QUEUE_SIZE + 1;
dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
if (!dmn->send_ring->cq) { if (!dmn->send_ring->cq) {
mlx5dr_err(dmn, "Failed creating CQ\n");
ret = -ENOMEM; ret = -ENOMEM;
goto free_send_ring; goto free_send_ring;
} }
...@@ -872,6 +879,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn) ...@@ -872,6 +879,7 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr); dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
if (!dmn->send_ring->qp) { if (!dmn->send_ring->qp) {
mlx5dr_err(dmn, "Failed creating QP\n");
ret = -ENOMEM; ret = -ENOMEM;
goto clean_cq; goto clean_cq;
} }
......
...@@ -728,7 +728,7 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, ...@@ -728,7 +728,7 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{ {
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) { if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_dbg(dmn, "Partial mask source_port is not supported\n"); mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
return -EINVAL; return -EINVAL;
} }
} }
......
...@@ -128,16 +128,20 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn, ...@@ -128,16 +128,20 @@ static int dr_table_init_nic(struct mlx5dr_domain *dmn,
DR_CHUNK_SIZE_1, DR_CHUNK_SIZE_1,
MLX5DR_STE_LU_TYPE_DONT_CARE, MLX5DR_STE_LU_TYPE_DONT_CARE,
0); 0);
if (!nic_tbl->s_anchor) if (!nic_tbl->s_anchor) {
mlx5dr_err(dmn, "Failed allocating htbl\n");
return -ENOMEM; return -ENOMEM;
}
info.type = CONNECT_MISS; info.type = CONNECT_MISS;
info.miss_icm_addr = nic_dmn->default_icm_addr; info.miss_icm_addr = nic_dmn->default_icm_addr;
ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
nic_tbl->s_anchor, nic_tbl->s_anchor,
&info, true); &info, true);
if (ret) if (ret) {
mlx5dr_err(dmn, "Failed int and send htbl\n");
goto free_s_anchor; goto free_s_anchor;
}
mlx5dr_htbl_get(nic_tbl->s_anchor); mlx5dr_htbl_get(nic_tbl->s_anchor);
......
...@@ -705,7 +705,7 @@ struct mlx5dr_matcher { ...@@ -705,7 +705,7 @@ struct mlx5dr_matcher {
struct mlx5dr_matcher_rx_tx rx; struct mlx5dr_matcher_rx_tx rx;
struct mlx5dr_matcher_rx_tx tx; struct mlx5dr_matcher_rx_tx tx;
struct list_head matcher_list; struct list_head matcher_list;
u16 prio; u32 prio;
struct mlx5dr_match_param mask; struct mlx5dr_match_param mask;
u8 match_criteria; u8 match_criteria;
refcount_t refcount; refcount_t refcount;
......
...@@ -140,7 +140,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns, ...@@ -140,7 +140,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_group *fg) struct mlx5_flow_group *fg)
{ {
struct mlx5dr_matcher *matcher; struct mlx5dr_matcher *matcher;
u16 priority = MLX5_GET(create_flow_group_in, in, u32 priority = MLX5_GET(create_flow_group_in, in,
start_flow_index); start_flow_index);
u8 match_criteria_enable = MLX5_GET(create_flow_group_in, u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
in, in,
......
...@@ -59,7 +59,7 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *table); ...@@ -59,7 +59,7 @@ u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
struct mlx5dr_matcher * struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table, mlx5dr_matcher_create(struct mlx5dr_table *table,
u16 priority, u32 priority,
u8 match_criteria_enable, u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask); struct mlx5dr_match_parameters *mask);
...@@ -151,7 +151,7 @@ mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; } ...@@ -151,7 +151,7 @@ mlx5dr_table_get_id(struct mlx5dr_table *table) { return 0; }
static inline struct mlx5dr_matcher * static inline struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table *table, mlx5dr_matcher_create(struct mlx5dr_table *table,
u16 priority, u32 priority,
u8 match_criteria_enable, u8 match_criteria_enable,
struct mlx5dr_match_parameters *mask) { return NULL; } struct mlx5dr_match_parameters *mask) { return NULL; }
......
...@@ -84,6 +84,7 @@ enum { ...@@ -84,6 +84,7 @@ enum {
FDB_TC_OFFLOAD, FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD, FDB_FT_OFFLOAD,
FDB_SLOW_PATH, FDB_SLOW_PATH,
FDB_PER_VPORT,
}; };
struct mlx5_pkt_reformat; struct mlx5_pkt_reformat;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment