Commit 9d0279d0 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2021-07-27' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-07-27

This series introduces some fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8373cd38 740452e0
...@@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data) ...@@ -500,10 +500,7 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1; return 1;
} }
/* This function is called with two flows: /* Must be called with intf_mutex held */
* 1. During initialization of mlx5_core_dev and we don't need to lock it.
* 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
*/
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{ {
struct auxiliary_device *adev; struct auxiliary_device *adev;
......
...@@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, ...@@ -471,6 +471,15 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
} }
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
return ro && params->lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
}
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
...@@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev, ...@@ -508,7 +517,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
} }
MLX5_SET(wq, wq, wq_type, params->rq_wq_type); MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
MLX5_SET(wq, wq, log_wq_stride, MLX5_SET(wq, wq, log_wq_stride,
mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
......
...@@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, ...@@ -482,8 +482,11 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
params->log_sq_size = orig->log_sq_size; params->log_sq_size = orig->log_sq_size;
mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param); mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
} }
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) /* RQ */
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
params->vlan_strip_disable = orig->vlan_strip_disable;
mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams); mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
}
} }
static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
...@@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, ...@@ -494,7 +497,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
int err; int err;
rq->wq_type = params->rq_wq_type; rq->wq_type = params->rq_wq_type;
rq->pdev = mdev->device; rq->pdev = c->pdev;
rq->netdev = priv->netdev; rq->netdev = priv->netdev;
rq->priv = priv; rq->priv = priv;
rq->clock = &mdev->clock; rq->clock = &mdev->clock;
......
...@@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params ...@@ -37,7 +37,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params
struct mlx5e_priv *priv = t->priv; struct mlx5e_priv *priv = t->priv;
rq->wq_type = params->rq_wq_type; rq->wq_type = params->rq_wq_type;
rq->pdev = mdev->device; rq->pdev = t->pdev;
rq->netdev = priv->netdev; rq->netdev = priv->netdev;
rq->priv = priv; rq->priv = priv;
rq->clock = &mdev->clock; rq->clock = &mdev->clock;
......
...@@ -3384,7 +3384,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en ...@@ -3384,7 +3384,7 @@ static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool en
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{ {
int err = 0; int err;
int i; int i;
for (i = 0; i < chs->num; i++) { for (i = 0; i < chs->num; i++) {
...@@ -3392,6 +3392,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) ...@@ -3392,6 +3392,8 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
if (err) if (err)
return err; return err;
} }
if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
return 0; return 0;
} }
...@@ -3829,6 +3831,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) ...@@ -3829,6 +3831,24 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
return 0; return 0;
} }
static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
netdev_features_t features)
{
features &= ~NETIF_F_HW_TLS_RX;
if (netdev->features & NETIF_F_HW_TLS_RX)
netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
features &= ~NETIF_F_HW_TLS_TX;
if (netdev->features & NETIF_F_HW_TLS_TX)
netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
features &= ~NETIF_F_NTUPLE;
if (netdev->features & NETIF_F_NTUPLE)
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
return features;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev, static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features) netdev_features_t features)
{ {
...@@ -3860,15 +3880,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, ...@@ -3860,15 +3880,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n"); netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
} }
if (mlx5e_is_uplink_rep(priv)) { if (mlx5e_is_uplink_rep(priv))
features &= ~NETIF_F_HW_TLS_RX; features = mlx5e_fix_uplink_rep_features(netdev, features);
if (netdev->features & NETIF_F_HW_TLS_RX)
netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
features &= ~NETIF_F_HW_TLS_TX;
if (netdev->features & NETIF_F_HW_TLS_TX)
netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
}
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
...@@ -4859,6 +4872,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4859,6 +4872,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (MLX5_CAP_ETH(mdev, scatter_fcs)) if (MLX5_CAP_ETH(mdev, scatter_fcs))
netdev->hw_features |= NETIF_F_RXFCS; netdev->hw_features |= NETIF_F_RXFCS;
if (mlx5_qos_is_supported(mdev))
netdev->hw_features |= NETIF_F_HW_TC;
netdev->features = netdev->hw_features; netdev->features = netdev->hw_features;
/* Defaults */ /* Defaults */
...@@ -4879,8 +4895,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4879,8 +4895,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_NTUPLE; netdev->hw_features |= NETIF_F_NTUPLE;
#endif #endif
} }
if (mlx5_qos_is_supported(mdev))
netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
......
...@@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, ...@@ -452,12 +452,32 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
static static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{ {
struct mlx5_core_dev *mdev;
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
netdev = __dev_get_by_index(net, ifindex); netdev = dev_get_by_index(net, ifindex);
if (!netdev)
return ERR_PTR(-ENODEV);
priv = netdev_priv(netdev); priv = netdev_priv(netdev);
return priv->mdev; mdev = priv->mdev;
dev_put(netdev);
/* Mirred tc action holds a refcount on the ifindex net_device (see
* net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
* after dev_put(netdev), while we're in the context of adding a tc flow.
*
* The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
* stored in a hairpin object, which exists until all flows, that refer to it, get
* removed.
*
* On the other hand, after a hairpin object has been created, the peer net_device may
* be removed/unbound while there are still some hairpin flows that are using it. This
* case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
* NETDEV_UNREGISTER event of the peer net_device.
*/
return mdev;
} }
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
...@@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params ...@@ -666,6 +686,10 @@ mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params
func_mdev = priv->mdev; func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (IS_ERR(peer_mdev)) {
err = PTR_ERR(peer_mdev);
goto create_pair_err;
}
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) { if (IS_ERR(pair)) {
...@@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, ...@@ -804,6 +828,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
int err; int err;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (IS_ERR(peer_mdev)) {
NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
return PTR_ERR(peer_mdev);
}
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) { if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported"); NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace { ...@@ -636,7 +636,7 @@ struct esw_vport_tbl_namespace {
}; };
struct mlx5_vport_tbl_attr { struct mlx5_vport_tbl_attr {
u16 chain; u32 chain;
u16 prio; u16 prio;
u16 vport; u16 vport;
const struct esw_vport_tbl_namespace *vport_ns; const struct esw_vport_tbl_namespace *vport_ns;
......
...@@ -382,10 +382,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f ...@@ -382,10 +382,11 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
{ {
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
dest[dest_idx].vport.vhca_id = if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id); dest[dest_idx].vport.vhca_id =
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) { if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
if (pkt_reformat) { if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
...@@ -2367,6 +2368,9 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2367,6 +2368,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) { switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR: case ESW_OFFLOADS_DEVCOM_PAIR:
if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
break;
if (mlx5_eswitch_vport_match_metadata_enabled(esw) != if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break; break;
......
...@@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, ...@@ -1024,17 +1024,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
struct fs_prio *prio) struct fs_prio *prio)
{ {
struct mlx5_flow_table *next_ft; struct mlx5_flow_table *next_ft, *first_ft;
int err = 0; int err = 0;
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */ /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
if (list_empty(&prio->node.children)) { first_ft = list_first_entry_or_null(&prio->node.children,
struct mlx5_flow_table, node.list);
if (!first_ft || first_ft->level > ft->level) {
err = connect_prev_fts(dev, ft, prio); err = connect_prev_fts(dev, ft, prio);
if (err) if (err)
return err; return err;
next_ft = find_next_chained_ft(prio); next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
err = connect_fwd_rules(dev, ft, next_ft); err = connect_fwd_rules(dev, ft, next_ft);
if (err) if (err)
return err; return err;
...@@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft) ...@@ -2120,7 +2122,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
node.list) == ft)) node.list) == ft))
return 0; return 0;
next_ft = find_next_chained_ft(prio); next_ft = find_next_ft(ft);
err = connect_fwd_rules(dev, next_ft, ft); err = connect_fwd_rules(dev, next_ft, ft);
if (err) if (err)
return err; return err;
......
...@@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) ...@@ -626,8 +626,16 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
} }
fw_reporter_ctx.err_synd = health->synd; fw_reporter_ctx.err_synd = health->synd;
fw_reporter_ctx.miss_counter = health->miss_counter; fw_reporter_ctx.miss_counter = health->miss_counter;
devlink_health_report(health->fw_fatal_reporter, if (devlink_health_report(health->fw_fatal_reporter,
"FW fatal error reported", &fw_reporter_ctx); "FW fatal error reported", &fw_reporter_ctx) == -ECANCELED) {
/* If recovery wasn't performed, due to grace period,
* unload the driver. This ensures that the driver
* closes all its resources and it is not subjected to
* requests from the kernel.
*/
mlx5_core_err(dev, "Driver is in error state. Unloading\n");
mlx5_unload_one(dev);
}
} }
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment