Commit 537d0779 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2019-12-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-12-05

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

For -stable v4.19:
 ('net/mlx5e: Query global pause state before setting prio2buffer')

For -stable v5.3
 ('net/mlx5e: Fix SFF 8472 eeprom length')
 ('net/mlx5e: Fix translation of link mode into speed')
 ('net/mlx5e: Fix freeing flow with kfree() and not kvfree()')
 ('net/mlx5e: ethtool, Fix analysis of speed setting')
 ('net/mlx5e: Fix TXQ indices to be sequential')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 04aa1bc4 b7826076
...@@ -816,7 +816,7 @@ struct mlx5e_xsk { ...@@ -816,7 +816,7 @@ struct mlx5e_xsk {
struct mlx5e_priv { struct mlx5e_priv {
/* priv data path fields - start */ /* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp; struct mlx5e_dcbx_dp dcbx_dp;
#endif #endif
......
...@@ -73,6 +73,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { ...@@ -73,6 +73,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000, [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000,
[MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000, [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000,
[MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
[MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
[MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
[MLX5E_400GAUI_8] = 400000, [MLX5E_400GAUI_8] = 400000,
}; };
......
...@@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, ...@@ -155,8 +155,11 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
} }
if (port_buffer->buffer[i].size < if (port_buffer->buffer[i].size <
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
i, port_buffer->buffer[i].size);
return -ENOMEM; return -ENOMEM;
}
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
port_buffer->buffer[i].xon = port_buffer->buffer[i].xon =
...@@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu, ...@@ -232,6 +235,26 @@ static int update_buffer_lossy(unsigned int max_mtu,
return 0; return 0;
} }
static int fill_pfc_en(struct mlx5_core_dev *mdev, u8 *pfc_en)
{
u32 g_rx_pause, g_tx_pause;
int err;
err = mlx5_query_port_pause(mdev, &g_rx_pause, &g_tx_pause);
if (err)
return err;
/* If global pause enabled, set all active buffers to lossless.
* Otherwise, check PFC setting.
*/
if (g_rx_pause || g_tx_pause)
*pfc_en = 0xff;
else
err = mlx5_query_port_pfc(mdev, pfc_en, NULL);
return err;
}
#define MINIMUM_MAX_MTU 9216 #define MINIMUM_MAX_MTU 9216
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu, u32 change, unsigned int mtu,
...@@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, ...@@ -277,7 +300,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
update_prio2buffer = true; update_prio2buffer = true;
err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL); err = fill_pfc_en(priv->mdev, &curr_pfc_en);
if (err) if (err)
return err; return err;
......
...@@ -1027,18 +1027,11 @@ static bool ext_link_mode_requested(const unsigned long *adver) ...@@ -1027,18 +1027,11 @@ static bool ext_link_mode_requested(const unsigned long *adver)
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
} }
static bool ext_speed_requested(u32 speed) static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
{
#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
}
static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
{ {
bool ext_link_mode = ext_link_mode_requested(adver); bool ext_link_mode = ext_link_mode_requested(adver);
bool ext_speed = ext_speed_requested(speed);
return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed; return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
} }
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
...@@ -1065,8 +1058,8 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, ...@@ -1065,8 +1058,8 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
autoneg = link_ksettings->base.autoneg; autoneg = link_ksettings->base.autoneg;
speed = link_ksettings->base.speed; speed = link_ksettings->base.speed;
ext = ext_requested(autoneg, adver, speed),
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
ext = ext_requested(autoneg, adver, ext_supported);
if (!ext_supported && ext) if (!ext_supported && ext)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1643,7 +1636,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, ...@@ -1643,7 +1636,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
break; break;
case MLX5_MODULE_ID_SFP: case MLX5_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472; modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break; break;
default: default:
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
......
...@@ -1691,11 +1691,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, ...@@ -1691,11 +1691,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
{ {
struct mlx5e_priv *priv = c->priv;
int err, tc; int err, tc;
for (tc = 0; tc < params->num_tc; tc++) { for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * priv->max_nch; int txq_ix = c->ix + tc * params->num_channels;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->sq, &c->sq[tc], tc); params, &cparam->sq, &c->sq[tc], tc);
...@@ -2876,26 +2875,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) ...@@ -2876,26 +2875,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_tc_queue(netdev, tc, nch, 0); netdev_set_tc_queue(netdev, tc, nch, 0);
} }
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
{ {
int i, tc; int i, ch;
for (i = 0; i < priv->max_nch; i++) ch = priv->channels.num;
for (tc = 0; tc < priv->profile->max_tc; tc++)
priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch;
}
static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) for (i = 0; i < ch; i++) {
{ int tc;
struct mlx5e_channel *c;
struct mlx5e_txqsq *sq; for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
int i, tc; struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5e_txqsq *sq = &c->sq[tc];
for (i = 0; i < priv->channels.num; i++) {
c = priv->channels.c[i];
for (tc = 0; tc < c->num_tc; tc++) {
sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq; priv->txq2sq[sq->txq_ix] = sq;
priv->channel_tc2realtxq[i][tc] = i + tc * ch;
} }
} }
} }
...@@ -2910,7 +2904,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) ...@@ -2910,7 +2904,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, num_rxqs); netif_set_real_num_rx_queues(netdev, num_rxqs);
mlx5e_build_tx2sq_maps(priv); mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels); mlx5e_activate_channels(&priv->channels);
mlx5e_xdp_tx_enable(priv); mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev); netif_tx_start_all_queues(priv->netdev);
...@@ -5021,7 +5015,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, ...@@ -5021,7 +5015,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
if (err) if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev); mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
mlx5e_health_create_reporters(priv); mlx5e_health_create_reporters(priv);
return 0; return 0;
......
...@@ -1601,7 +1601,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, ...@@ -1601,7 +1601,7 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
for (j = 0; j < NUM_SQ_STATS; j++) for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format, sq_stats_desc[j].format,
priv->channel_tc2txq[i][tc]); i + tc * max_nch);
for (i = 0; i < max_nch; i++) { for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++) for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
......
...@@ -1626,8 +1626,11 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) ...@@ -1626,8 +1626,11 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
flow_flag_clear(flow, DUP); flow_flag_clear(flow, DUP);
if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kvfree(flow->peer_flow); kfree(flow->peer_flow);
}
flow->peer_flow = NULL; flow->peer_flow = NULL;
} }
......
...@@ -93,7 +93,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -93,7 +93,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
if (txq_ix >= num_channels) if (txq_ix >= num_channels)
txq_ix = priv->txq2sq[txq_ix]->ch_ix; txq_ix = priv->txq2sq[txq_ix]->ch_ix;
return priv->channel_tc2txq[txq_ix][up]; return priv->channel_tc2realtxq[txq_ix][up];
} }
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
......
...@@ -81,7 +81,14 @@ struct vport_ingress { ...@@ -81,7 +81,14 @@ struct vport_ingress {
struct mlx5_fc *drop_counter; struct mlx5_fc *drop_counter;
} legacy; } legacy;
struct { struct {
struct mlx5_flow_group *metadata_grp; /* Optional group to add an FTE to do internal priority
* tagging on ingress packets.
*/
struct mlx5_flow_group *metadata_prio_tag_grp;
/* Group to add default match-all FTE entry to tag ingress
* packet with metadata.
*/
struct mlx5_flow_group *metadata_allmatch_grp;
struct mlx5_modify_hdr *modify_metadata; struct mlx5_modify_hdr *modify_metadata;
struct mlx5_flow_handle *modify_metadata_rule; struct mlx5_flow_handle *modify_metadata_rule;
} offloads; } offloads;
......
...@@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw) ...@@ -88,6 +88,14 @@ u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
return 1; return 1;
} }
static bool
esw_check_ingress_prio_tag_enabled(const struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
{
return (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
mlx5_eswitch_is_vf_vport(esw, vport->vport));
}
static void static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
...@@ -1760,12 +1768,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, ...@@ -1760,12 +1768,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
* required, allow * required, allow
* Unmatched traffic is allowed by default * Unmatched traffic is allowed by default
*/ */
spec = kvzalloc(sizeof(*spec), GFP_KERNEL); spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) { if (!spec)
err = -ENOMEM; return -ENOMEM;
goto out_no_mem;
}
/* Untagged packets - push prio tag VLAN, allow */ /* Untagged packets - push prio tag VLAN, allow */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
...@@ -1791,14 +1796,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw, ...@@ -1791,14 +1796,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress untagged allow rule, err(%d)\n", "vport[%d] configure ingress untagged allow rule, err(%d)\n",
vport->vport, err); vport->vport, err);
vport->ingress.allow_rule = NULL; vport->ingress.allow_rule = NULL;
goto out;
} }
out:
kvfree(spec); kvfree(spec);
out_no_mem:
if (err)
esw_vport_cleanup_ingress_rules(esw, vport);
return err; return err;
} }
...@@ -1836,13 +1836,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, ...@@ -1836,13 +1836,9 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
esw_warn(esw->dev, esw_warn(esw->dev,
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
vport->vport, err); vport->vport, err);
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
vport->ingress.offloads.modify_metadata_rule = NULL; vport->ingress.offloads.modify_metadata_rule = NULL;
goto out;
} }
out:
if (err)
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
return err; return err;
} }
...@@ -1862,50 +1858,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw, ...@@ -1862,50 +1858,103 @@ static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
{ {
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g; struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in; u32 *flow_group_in;
u32 flow_index = 0;
int ret = 0; int ret = 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL); flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in) if (!flow_group_in)
return -ENOMEM; return -ENOMEM;
if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
/* This group is to hold FTE to match untagged packets when prio_tag
* is enabled.
*/
memset(flow_group_in, 0, inlen); memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) { if (IS_ERR(g)) {
ret = PTR_ERR(g); ret = PTR_ERR(g);
esw_warn(esw->dev, esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
"Failed to create vport[%d] ingress metadata group, err(%d)\n", vport->vport, ret);
goto prio_tag_err;
}
vport->ingress.offloads.metadata_prio_tag_grp = g;
flow_index++;
}
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
/* This group holds an FTE with no matches for add metadata for
* tagged packets, if prio-tag is enabled (as a fallthrough),
* or all traffic in case prio-tag is disabled.
*/
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
if (IS_ERR(g)) {
ret = PTR_ERR(g);
esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n",
vport->vport, ret); vport->vport, ret);
goto grp_err; goto metadata_err;
}
vport->ingress.offloads.metadata_allmatch_grp = g;
} }
vport->ingress.offloads.metadata_grp = g;
grp_err: kvfree(flow_group_in);
return 0;
metadata_err:
if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
}
prio_tag_err:
kvfree(flow_group_in); kvfree(flow_group_in);
return ret; return ret;
} }
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport) static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
{ {
if (vport->ingress.offloads.metadata_grp) { if (vport->ingress.offloads.metadata_allmatch_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp); mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp);
vport->ingress.offloads.metadata_grp = NULL; vport->ingress.offloads.metadata_allmatch_grp = NULL;
}
if (vport->ingress.offloads.metadata_prio_tag_grp) {
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp);
vport->ingress.offloads.metadata_prio_tag_grp = NULL;
} }
} }
static int esw_vport_ingress_config(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
int num_ftes = 0;
int err; int err;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
!MLX5_CAP_GEN(esw->dev, prio_tag_required)) !esw_check_ingress_prio_tag_enabled(esw, vport))
return 0; return 0;
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
err = esw_vport_create_ingress_acl_table(esw, vport, 1);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
num_ftes++;
if (esw_check_ingress_prio_tag_enabled(esw, vport))
num_ftes++;
err = esw_vport_create_ingress_acl_table(esw, vport, num_ftes);
if (err) { if (err) {
esw_warn(esw->dev, esw_warn(esw->dev,
"failed to enable ingress acl (%d) on vport[%d]\n", "failed to enable ingress acl (%d) on vport[%d]\n",
...@@ -1926,8 +1975,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1926,8 +1975,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto metadata_err; goto metadata_err;
} }
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) && if (esw_check_ingress_prio_tag_enabled(esw, vport)) {
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_ingress_prio_tag_config(esw, vport); err = esw_vport_ingress_prio_tag_config(esw, vport);
if (err) if (err)
goto prio_tag_err; goto prio_tag_err;
...@@ -1937,7 +1985,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1937,7 +1985,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
prio_tag_err: prio_tag_err:
esw_vport_del_ingress_acl_modify_metadata(esw, vport); esw_vport_del_ingress_acl_modify_metadata(esw, vport);
metadata_err: metadata_err:
esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_destroy_ingress_acl_group(vport); esw_vport_destroy_ingress_acl_group(vport);
group_err: group_err:
esw_vport_destroy_ingress_acl_table(vport); esw_vport_destroy_ingress_acl_table(vport);
...@@ -2008,8 +2055,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -2008,8 +2055,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) { if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_vport_egress_config(esw, vport); err = esw_vport_egress_config(esw, vport);
if (err) { if (err) {
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_destroy_ingress_acl_group(vport);
esw_vport_destroy_ingress_acl_table(vport); esw_vport_destroy_ingress_acl_table(vport);
} }
} }
...@@ -2021,8 +2069,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, ...@@ -2021,8 +2069,8 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_egress_acl(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_cleanup_ingress_rules(esw, vport); esw_vport_cleanup_ingress_rules(esw, vport);
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
esw_vport_destroy_ingress_acl_group(vport); esw_vport_destroy_ingress_acl_group(vport);
esw_vport_destroy_ingress_acl_table(vport); esw_vport_destroy_ingress_acl_table(vport);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment