Commit f88d5d68 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2019-02-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-02-21

This series adds some misc updates to mlx5 driver,

1) Eli Britstein, Introduces tunnel entropy control from PCMR register
and fixes GRE key by controlling port tunnel entropy calculation.

2) Eran Ben Elisha, provides some mlx5 fixes to the latest tx devlink health
reporting mechanism.

3) Huy Nguyen, Added the support for ndo bridge_setlink to allow
   VEPA/VEB E-Switch legacy mode configurations.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b55874f1 4b89251d
...@@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ ...@@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o
# #
# Core extra # Core extra
......
...@@ -113,6 +113,18 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) ...@@ -113,6 +113,18 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
return 0; return 0;
} }
static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
char *err_str,
struct mlx5e_tx_err_ctx *err_ctx)
{
if (IS_ERR_OR_NULL(tx_reporter)) {
netdev_err(err_ctx->sq->channel->netdev, err_str);
return err_ctx->recover(err_ctx->sq);
}
return devlink_health_report(tx_reporter, err_str, err_ctx);
}
void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq) void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
{ {
char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
...@@ -122,7 +134,7 @@ void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq) ...@@ -122,7 +134,7 @@ void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn); sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
devlink_health_report(sq->channel->priv->tx_reporter, err_str, mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
&err_ctx); &err_ctx);
} }
...@@ -136,7 +148,7 @@ static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq) ...@@ -136,7 +148,7 @@ static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
eq->core.eqn, eq->core.cons_index, eq->core.irqn); eq->core.eqn, eq->core.cons_index, eq->core.irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq); eqe_count = mlx5_eq_poll_irq_disabled(eq);
ret = eqe_count ? true : false; ret = eqe_count ? false : true;
if (!eqe_count) { if (!eqe_count) {
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
return ret; return ret;
...@@ -160,7 +172,7 @@ int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq) ...@@ -160,7 +172,7 @@ int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - sq->txq->trans_start)); jiffies_to_usecs(jiffies - sq->txq->trans_start));
return devlink_health_report(sq->channel->priv->tx_reporter, err_str, return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
&err_ctx); &err_ctx);
} }
...@@ -281,11 +293,11 @@ int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) ...@@ -281,11 +293,11 @@ int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD, MLX5_REPORTER_TX_GRACEFUL_PERIOD,
true, priv); true, priv);
if (IS_ERR_OR_NULL(priv->tx_reporter)) if (IS_ERR(priv->tx_reporter))
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to create tx reporter, err = %ld\n", "Failed to create tx reporter, err = %ld\n",
PTR_ERR(priv->tx_reporter)); PTR_ERR(priv->tx_reporter));
return PTR_ERR_OR_ZERO(priv->tx_reporter); return IS_ERR_OR_NULL(priv->tx_reporter);
} }
void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <net/vxlan.h> #include <net/vxlan.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h> #include <net/page_pool.h>
#include "eswitch.h" #include "eswitch.h"
#include "en.h" #include "en.h"
...@@ -4173,12 +4174,6 @@ static void mlx5e_tx_timeout(struct net_device *dev) ...@@ -4173,12 +4174,6 @@ static void mlx5e_tx_timeout(struct net_device *dev)
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
netdev_err(dev, "TX timeout detected\n"); netdev_err(dev, "TX timeout detected\n");
if (IS_ERR_OR_NULL(priv->tx_reporter)) {
netdev_err_once(priv->netdev, "tx timeout will not be handled, no valid tx reporter\n");
return;
}
queue_work(priv->wq, &priv->tx_timeout_work); queue_work(priv->wq, &priv->tx_timeout_work);
} }
...@@ -4311,6 +4306,61 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -4311,6 +4306,61 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
} }
} }
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
int nlflags)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
int err;
err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
if (err)
return err;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
0, 0, nlflags, filter_mask, NULL);
}
static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 flags, struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct nlattr *attr, *br_spec;
u16 mode = BRIDGE_MODE_UNDEF;
u8 setting;
int rem;
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
if (nla_len(attr) < sizeof(mode))
return -EINVAL;
mode = nla_get_u16(attr);
if (mode > BRIDGE_MODE_VEPA)
return -EINVAL;
break;
}
if (mode == BRIDGE_MODE_UNDEF)
return -EINVAL;
setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
}
#endif
const struct net_device_ops mlx5e_netdev_ops = { const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open, .ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close, .ndo_stop = mlx5e_close,
...@@ -4337,6 +4387,9 @@ const struct net_device_ops mlx5e_netdev_ops = { ...@@ -4337,6 +4387,9 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer, .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif #endif
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
.ndo_bridge_setlink = mlx5e_bridge_setlink,
.ndo_bridge_getlink = mlx5e_bridge_getlink,
/* SRIOV E-Switch NDOs */ /* SRIOV E-Switch NDOs */
.ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include "en_tc.h" #include "en_tc.h"
#include "en/tc_tun.h" #include "en/tc_tun.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/port_tun.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
...@@ -1044,14 +1045,23 @@ static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv, ...@@ -1044,14 +1045,23 @@ static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e) struct mlx5e_encap_entry *e)
{ {
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
struct mlx5e_neigh_hash_entry *nhe; struct mlx5e_neigh_hash_entry *nhe;
int err; int err;
err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
if (err)
return err;
nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
if (!nhe) { if (!nhe) {
err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
if (err) if (err) {
mlx5_tun_entropy_refcount_dec(tun_entropy,
e->reformat_type);
return err; return err;
}
} }
list_add(&e->encap_list, &nhe->encap_list); list_add(&e->encap_list, &nhe->encap_list);
return 0; return 0;
...@@ -1060,6 +1070,9 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, ...@@ -1060,6 +1070,9 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e) struct mlx5e_encap_entry *e)
{ {
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
struct mlx5e_neigh_hash_entry *nhe; struct mlx5e_neigh_hash_entry *nhe;
list_del(&e->encap_list); list_del(&e->encap_list);
...@@ -1067,6 +1080,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, ...@@ -1067,6 +1080,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
if (list_empty(&nhe->encap_list)) if (list_empty(&nhe->encap_list))
mlx5e_rep_neigh_entry_destroy(priv, nhe); mlx5e_rep_neigh_entry_destroy(priv, nhe);
mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
} }
static int mlx5e_vf_rep_open(struct net_device *dev) static int mlx5e_vf_rep_open(struct net_device *dev)
...@@ -1564,6 +1578,8 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) ...@@ -1564,6 +1578,8 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (err) if (err)
goto destroy_tises; goto destroy_tises;
mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
/* init indirect block notifications */ /* init indirect block notifications */
INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include "eswitch.h" #include "eswitch.h"
#include "en.h" #include "en.h"
#include "lib/port_tun.h"
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
struct mlx5e_neigh_update_table { struct mlx5e_neigh_update_table {
...@@ -71,6 +72,8 @@ struct mlx5_rep_uplink_priv { ...@@ -71,6 +72,8 @@ struct mlx5_rep_uplink_priv {
*/ */
struct list_head tc_indr_block_priv_list; struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb; struct notifier_block netdevice_nb;
struct mlx5_tun_entropy tun_entropy;
}; };
struct mlx5e_rep_priv { struct mlx5e_rep_priv {
......
...@@ -508,9 +508,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -508,9 +508,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
&sq->state)) { &sq->state)) {
mlx5e_dump_error_cqe(sq, mlx5e_dump_error_cqe(sq,
(struct mlx5_err_cqe *)cqe); (struct mlx5_err_cqe *)cqe);
if (!IS_ERR_OR_NULL(cq->channel->priv->tx_reporter)) queue_work(cq->channel->priv->wq,
queue_work(cq->channel->priv->wq, &sq->recover_work);
&sq->recover_work);
} }
stats->cqe_err++; stats->cqe_err++;
} }
......
...@@ -64,6 +64,9 @@ enum { ...@@ -64,6 +64,9 @@ enum {
PROMISC_CHANGE = BIT(3), PROMISC_CHANGE = BIT(3),
}; };
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
/* Vport context events */ /* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
MC_ADDR_CHANGE | \ MC_ADDR_CHANGE | \
...@@ -268,6 +271,37 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) ...@@ -268,6 +271,37 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
} }
enum {
LEGACY_VEPA_PRIO = 0,
LEGACY_FDB_PRIO,
};
static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
int err;
root_ns = mlx5_get_fdb_sub_ns(dev, 0);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
return -EOPNOTSUPP;
}
/* num FTE 2, num FG 2 */
fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO,
2, 2, 0, 0);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
return err;
}
esw->fdb_table.legacy.vepa_fdb = fdb;
return 0;
}
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
...@@ -296,8 +330,8 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) ...@@ -296,8 +330,8 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
return -ENOMEM; return -ENOMEM;
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
ft_attr.max_fte = table_size; ft_attr.max_fte = table_size;
ft_attr.prio = LEGACY_FDB_PRIO;
fdb = mlx5_create_flow_table(root_ns, &ft_attr); fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
err = PTR_ERR(fdb); err = PTR_ERR(fdb);
...@@ -356,41 +390,65 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) ...@@ -356,41 +390,65 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
esw->fdb_table.legacy.promisc_grp = g; esw->fdb_table.legacy.promisc_grp = g;
out: out:
if (err) { if (err)
if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) { esw_destroy_legacy_fdb_table(esw);
mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
esw->fdb_table.legacy.allmulti_grp = NULL;
}
if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
esw->fdb_table.legacy.addr_grp = NULL;
}
if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) {
mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
esw->fdb_table.legacy.fdb = NULL;
}
}
kvfree(flow_group_in); kvfree(flow_group_in);
return err; return err;
} }
static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
{
esw_debug(esw->dev, "Destroy VEPA Table\n");
if (!esw->fdb_table.legacy.vepa_fdb)
return;
mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
esw->fdb_table.legacy.vepa_fdb = NULL;
}
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
{ {
esw_debug(esw->dev, "Destroy FDB Table\n");
if (!esw->fdb_table.legacy.fdb) if (!esw->fdb_table.legacy.fdb)
return; return;
esw_debug(esw->dev, "Destroy FDB Table\n"); if (esw->fdb_table.legacy.promisc_grp)
mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); if (esw->fdb_table.legacy.allmulti_grp)
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
if (esw->fdb_table.legacy.addr_grp)
mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
esw->fdb_table.legacy.fdb = NULL; esw->fdb_table.legacy.fdb = NULL;
esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.addr_grp = NULL;
esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL;
esw->fdb_table.legacy.promisc_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL;
} }
static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
int err;
err = esw_create_legacy_vepa_table(esw);
if (err)
return err;
err = esw_create_legacy_fdb_table(esw);
if (err)
esw_destroy_legacy_vepa_table(esw);
return err;
}
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
esw_destroy_legacy_fdb_table(esw);
esw_destroy_legacy_vepa_table(esw);
}
/* E-Switch vport UC/MC lists management */ /* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr); struct vport_addr *vaddr);
...@@ -1677,7 +1735,9 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1677,7 +1735,9 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
mlx5_lag_update(esw->dev); mlx5_lag_update(esw->dev);
if (mode == SRIOV_LEGACY) { if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw); err = esw_create_legacy_table(esw);
if (err)
goto abort;
} else { } else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
...@@ -1758,7 +1818,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1758,7 +1818,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_destroy_tsar(esw); esw_destroy_tsar(esw);
if (esw->mode == SRIOV_LEGACY) if (esw->mode == SRIOV_LEGACY)
esw_destroy_legacy_fdb_table(esw); esw_destroy_legacy_table(esw);
else if (esw->mode == SRIOV_OFFLOADS) else if (esw->mode == SRIOV_OFFLOADS)
esw_offloads_cleanup(esw); esw_offloads_cleanup(esw);
...@@ -2041,6 +2101,127 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, ...@@ -2041,6 +2101,127 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
return err; return err;
} }
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
{
if (esw->fdb_table.legacy.vepa_uplink_rule)
mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
if (esw->fdb_table.legacy.vepa_star_rule)
mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
esw->fdb_table.legacy.vepa_uplink_rule = NULL;
esw->fdb_table.legacy.vepa_star_rule = NULL;
}
static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
u8 setting)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
int err = 0;
void *misc;
if (!setting) {
esw_cleanup_vepa_rules(esw);
return 0;
}
if (esw->fdb_table.legacy.vepa_uplink_rule)
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* Uplink rule forward uplink traffic to FDB */
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = esw->fdb_table.legacy.fdb;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
} else {
esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
}
/* Star rule to forward all traffic to uplink vport */
memset(spec, 0, sizeof(*spec));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
} else {
esw->fdb_table.legacy.vepa_star_rule = flow_rule;
}
out:
kvfree(spec);
if (err)
esw_cleanup_vepa_rules(esw);
return err;
}
int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
{
int err = 0;
if (!esw)
return -EOPNOTSUPP;
if (!ESW_ALLOWED(esw))
return -EPERM;
mutex_lock(&esw->state_lock);
if (esw->mode != SRIOV_LEGACY) {
err = -EOPNOTSUPP;
goto out;
}
err = _mlx5_eswitch_set_vepa_locked(esw, setting);
out:
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
{
int err = 0;
if (!esw)
return -EOPNOTSUPP;
if (!ESW_ALLOWED(esw))
return -EPERM;
mutex_lock(&esw->state_lock);
if (esw->mode != SRIOV_LEGACY) {
err = -EOPNOTSUPP;
goto out;
}
*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
out:
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport, bool setting) int vport, bool setting)
{ {
......
...@@ -137,6 +137,9 @@ struct mlx5_eswitch_fdb { ...@@ -137,6 +137,9 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *addr_grp; struct mlx5_flow_group *addr_grp;
struct mlx5_flow_group *allmulti_grp; struct mlx5_flow_group *allmulti_grp;
struct mlx5_flow_group *promisc_grp; struct mlx5_flow_group *promisc_grp;
struct mlx5_flow_table *vepa_fdb;
struct mlx5_flow_handle *vepa_uplink_rule;
struct mlx5_flow_handle *vepa_star_rule;
} legacy; } legacy;
struct offloads_fdb { struct offloads_fdb {
...@@ -242,6 +245,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, ...@@ -242,6 +245,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting); int vport_num, bool setting);
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate); u32 max_rate, u32 min_rate);
int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi); int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
#include "lib/port_tun.h"
struct mlx5_port_tun_entropy_flags {
bool force_supported, force_enabled;
bool calc_supported, calc_enabled;
bool gre_calc_supported, gre_calc_enabled;
};
static void mlx5_query_port_tun_entropy(struct mlx5_core_dev *mdev,
struct mlx5_port_tun_entropy_flags *entropy_flags)
{
u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
/* Default values for FW which do not support MLX5_REG_PCMR */
entropy_flags->force_supported = false;
entropy_flags->calc_supported = false;
entropy_flags->gre_calc_supported = false;
entropy_flags->force_enabled = false;
entropy_flags->calc_enabled = true;
entropy_flags->gre_calc_enabled = true;
if (!MLX5_CAP_GEN(mdev, ports_check))
return;
if (mlx5_query_ports_check(mdev, out, sizeof(out)))
return;
entropy_flags->force_supported = !!(MLX5_GET(pcmr_reg, out, entropy_force_cap));
entropy_flags->calc_supported = !!(MLX5_GET(pcmr_reg, out, entropy_calc_cap));
entropy_flags->gre_calc_supported = !!(MLX5_GET(pcmr_reg, out, entropy_gre_calc_cap));
entropy_flags->force_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_force));
entropy_flags->calc_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_calc));
entropy_flags->gre_calc_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_gre_calc));
}
static int mlx5_set_port_tun_entropy_calc(struct mlx5_core_dev *mdev, u8 enable,
u8 force)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
int err;
err = mlx5_query_ports_check(mdev, in, sizeof(in));
if (err)
return err;
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, entropy_force, force);
MLX5_SET(pcmr_reg, in, entropy_calc, enable);
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
static int mlx5_set_port_gre_tun_entropy_calc(struct mlx5_core_dev *mdev,
u8 enable, u8 force)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
int err;
err = mlx5_query_ports_check(mdev, in, sizeof(in));
if (err)
return err;
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, entropy_force, force);
MLX5_SET(pcmr_reg, in, entropy_gre_calc, enable);
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy,
struct mlx5_core_dev *mdev)
{
struct mlx5_port_tun_entropy_flags entropy_flags;
tun_entropy->mdev = mdev;
mutex_init(&tun_entropy->lock);
mlx5_query_port_tun_entropy(mdev, &entropy_flags);
tun_entropy->num_enabling_entries = 0;
tun_entropy->num_disabling_entries = 0;
tun_entropy->enabled = entropy_flags.calc_enabled;
tun_entropy->enabled =
(entropy_flags.calc_supported) ?
entropy_flags.calc_enabled : true;
}
static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
int reformat_type, bool enable)
{
struct mlx5_port_tun_entropy_flags entropy_flags;
int err;
mlx5_query_port_tun_entropy(tun_entropy->mdev, &entropy_flags);
/* Tunnel entropy calculation may be controlled either on port basis
* for all tunneling protocols or specifically for GRE protocol.
* Prioritize GRE protocol control (if capable) over global port
* configuration.
*/
if (entropy_flags.gre_calc_supported &&
reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) {
/* Other applications may change the global FW entropy
* calculations settings. Check that the current entropy value
* is the negative of the updated value.
*/
if (entropy_flags.force_enabled &&
enable == entropy_flags.gre_calc_enabled) {
mlx5_core_warn(tun_entropy->mdev,
"Unexpected GRE entropy calc setting - expected %d",
!entropy_flags.gre_calc_enabled);
return -EOPNOTSUPP;
}
err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, enable,
entropy_flags.force_supported);
if (err)
return err;
/* if we turn on the entropy we don't need to force it anymore */
if (entropy_flags.force_supported && enable) {
err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, 1, 0);
if (err)
return err;
}
} else if (entropy_flags.calc_supported) {
/* Other applications may change the global FW entropy
* calculations settings. Check that the current entropy value
* is the negative of the updated value.
*/
if (entropy_flags.force_enabled &&
enable == entropy_flags.calc_enabled) {
mlx5_core_warn(tun_entropy->mdev,
"Unexpected entropy calc setting - expected %d",
!entropy_flags.calc_enabled);
return -EOPNOTSUPP;
}
/* GRE requires disabling entropy calculation. if there are
* enabling entries (i.e VXLAN) we cannot turn it off for them,
* thus fail.
*/
if (tun_entropy->num_enabling_entries)
return -EOPNOTSUPP;
err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, enable,
entropy_flags.force_supported);
if (err)
return err;
tun_entropy->enabled = enable;
/* if we turn on the entropy we don't need to force it anymore */
if (entropy_flags.force_supported && enable) {
err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, 1, 0);
if (err)
return err;
}
}
return 0;
}
/* the function manages the refcount for enabling/disabling tunnel types.
* the return value indicates if the inc is successful or not, depending on
* entropy capabilities and configuration.
*/
int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy,
int reformat_type)
{
/* the default is error for unknown (non VXLAN/GRE tunnel types) */
int err = -EOPNOTSUPP;
mutex_lock(&tun_entropy->lock);
if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN &&
tun_entropy->enabled) {
/* in case entropy calculation is enabled for all tunneling
* types, it is ok for VXLAN, so approve.
* otherwise keep the error default.
*/
tun_entropy->num_enabling_entries++;
err = 0;
} else if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) {
/* turn off the entropy only for the first GRE rule.
* for the next rules the entropy was already disabled
* successfully.
*/
if (tun_entropy->num_disabling_entries == 0)
err = mlx5_set_entropy(tun_entropy, reformat_type, 0);
else
err = 0;
if (!err)
tun_entropy->num_disabling_entries++;
}
mutex_unlock(&tun_entropy->lock);
return err;
}
void mlx5_tun_entropy_refcount_dec(struct mlx5_tun_entropy *tun_entropy,
int reformat_type)
{
mutex_lock(&tun_entropy->lock);
if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN)
tun_entropy->num_enabling_entries--;
else if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE &&
--tun_entropy->num_disabling_entries == 0)
mlx5_set_entropy(tun_entropy, reformat_type, 1);
mutex_unlock(&tun_entropy->lock);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2019 Mellanox Technologies. */
#ifndef __MLX5_PORT_TUN_H__
#define __MLX5_PORT_TUN_H__
#include <linux/mlx5/driver.h>
struct mlx5_tun_entropy {
struct mlx5_core_dev *mdev;
u32 num_enabling_entries;
u32 num_disabling_entries;
u8 enabled;
struct mutex lock; /* lock the entropy fields */
};
void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy,
struct mlx5_core_dev *mdev);
int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy,
int reformat_type);
void mlx5_tun_entropy_refcount_dec(struct mlx5_tun_entropy *tun_entropy,
int reformat_type);
#endif /* __MLX5_PORT_TUN_H__ */
...@@ -764,8 +764,7 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) ...@@ -764,8 +764,7 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_wol); EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
int outlen)
{ {
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
...@@ -774,7 +773,7 @@ static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, ...@@ -774,7 +773,7 @@ static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
outlen, MLX5_REG_PCMR, 0, 0); outlen, MLX5_REG_PCMR, 0, 0);
} }
static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
{ {
u32 out[MLX5_ST_SZ_DW(pcmr_reg)]; u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
...@@ -785,7 +784,11 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) ...@@ -785,7 +784,11 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable) int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
{ {
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
int err;
err = mlx5_query_ports_check(mdev, in, sizeof(in));
if (err)
return err;
MLX5_SET(pcmr_reg, in, local_port, 1); MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, fcs_chk, enable); MLX5_SET(pcmr_reg, in, fcs_chk, enable);
return mlx5_set_ports_check(mdev, in, sizeof(in)); return mlx5_set_ports_check(mdev, in, sizeof(in));
......
...@@ -8473,9 +8473,17 @@ struct mlx5_ifc_pamp_reg_bits { ...@@ -8473,9 +8473,17 @@ struct mlx5_ifc_pamp_reg_bits {
struct mlx5_ifc_pcmr_reg_bits { struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8]; u8 reserved_at_0[0x8];
u8 local_port[0x8]; u8 local_port[0x8];
u8 reserved_at_10[0x2e]; u8 reserved_at_10[0x10];
u8 entropy_force_cap[0x1];
u8 entropy_calc_cap[0x1];
u8 entropy_gre_calc_cap[0x1];
u8 reserved_at_23[0x1b];
u8 fcs_cap[0x1]; u8 fcs_cap[0x1];
u8 reserved_at_3f[0x1f]; u8 reserved_at_3f[0x1];
u8 entropy_force[0x1];
u8 entropy_calc[0x1];
u8 entropy_gre_calc[0x1];
u8 reserved_at_43[0x1b];
u8 fcs_chk[0x1]; u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1]; u8 reserved_at_5f[0x1];
}; };
......
...@@ -182,6 +182,8 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, ...@@ -182,6 +182,8 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
bool *enabled); bool *enabled);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment