Commit 21947f46 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2019-07-29' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2019-07-29

This series includes updates to mlx5 driver,
1) Simplifications, cleanup and warning prints improvements

2) From Vlad Buslov:
Refactor mlx5 tc flow handling for unlocked execution (Part 1)

Currently, all cls API hardware offloads driver callbacks require caller
to hold rtnl lock when calling them. Cls API has already been updated to
update software filters in parallel (on classifiers that support
unlocked execution), however hardware offloads code still obtains rtnl
lock before calling driver tc callbacks. This set implements partial
support for unlocked execution that is leveraged by follow up
refactorings in specific mlx5 tc subsystems and patch to cls API that
implements API that allows drivers to register their callbacks as
rtnl-unlocked.

In mlx5 tc code mlx5e_tc_flow is the main structure that is used to
represent tc filter. Currently, code the structure itself and its
handlers in both tc and eswitch layers do not implement any kind of
synchronizations and assume external global synchronization provided by
rtnl lock instead. Implement following changes to remove dependency on
rtnl lock in flow handling code that are intended to be used a
groundwork for following changes to provide fully rtnl-independent mlx5
tc:

- Extend struct mlx5e_tc_flow with atomic reference counter and rcu to
  allow concurrent access from multiple tc and neigh update workqueue
  instances without introducing any additional locks specific to the
  structure. Its 'flags' field type is changed to atomic bitmask ops which
  is necessary for tc to interact with other concurrent tc instances or
  concurrent neigh update that need to skip flows that are not fully
  initialized (new INIT_DONE flow flag) and can change the flags
  according to neighbor state (flipping OFFLOADED flag).

- Protect unready flows list by new uplink_priv->unready_flows_lock
  mutex.

- Convert calls to netdev APIs that require rtnl lock in flow handling
  code to their rcu counterparts.

- Modify eswitch code that is called from tc layer and assume implicit
  external synchronization to be concurrency safe: change
  esw->offloads.num_flows type to atomic integer and re-arrange
  esw->state_lock usage to protect additional data.

Some of approaches to synchronizations presented in this patch set are
quite complicated (lockless concurrent usage of data structures with rcu
and reference counting, using fine-grained locking when necessary, retry
mechanisms to handle concurrent insertion of another instance of data
structure with same key, etc.). This is necessary to allow calling the
firmware in parallel in most cases, which is the main motivation of this
change since firmware calls are mach heavier operation than atomic
operations, multitude of locks and potential multiple retries during
concurrent accesses to same elements.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6a7ce95d b6fac0b4
...@@ -10,6 +10,8 @@ enum { ...@@ -10,6 +10,8 @@ enum {
}; };
struct mlx5e_tc_table { struct mlx5e_tc_table {
/* protects flow table */
struct mutex t_lock;
struct mlx5_flow_table *t; struct mlx5_flow_table *t;
struct rhashtable ht; struct rhashtable ht;
...@@ -132,12 +134,17 @@ struct mlx5e_ethtool_steering { ...@@ -132,12 +134,17 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
int mlx5e_get_rxnfc(struct net_device *dev, int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rule_locs); struct ethtool_rxnfc *info, u32 *rule_locs);
#else #else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { } static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { } static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */ #endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
......
...@@ -31,29 +31,36 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, ...@@ -31,29 +31,36 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev; real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
uplink_upper = netdev_master_upper_dev_get(uplink_dev);
rcu_read_lock();
uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
/* mlx5_lag_is_sriov() is a blocking function which can't be called
* while holding rcu read lock. Take the net_device for correctness
* sake.
*/
if (uplink_upper)
dev_hold(uplink_upper);
rcu_read_unlock();
dst_is_lag_dev = (uplink_upper && dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) && netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper && real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev)); mlx5_lag_is_sriov(priv->mdev));
if (uplink_upper)
dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or /* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink * it's a LAG device, use the uplink
*/ */
if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
dst_is_lag_dev) {
*route_dev = dev; *route_dev = dev;
*out_dev = uplink_dev; if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
} else { dst_is_lag_dev || is_vlan_dev(*route_dev))
*route_dev = dev;
if (is_vlan_dev(*route_dev))
*out_dev = uplink_dev; *out_dev = uplink_dev;
else if (mlx5e_eswitch_rep(dev) && else if (mlx5e_eswitch_rep(dev) &&
mlx5e_is_valid_eswitch_fwd_dev(priv, dev)) mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
*out_dev = *route_dev; *out_dev = *route_dev;
else else
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if (!(mlx5e_eswitch_rep(*out_dev) && if (!(mlx5e_eswitch_rep(*out_dev) &&
mlx5e_is_uplink_rep(netdev_priv(*out_dev)))) mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
......
...@@ -1888,21 +1888,27 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) ...@@ -1888,21 +1888,27 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags; return priv->channels.params.pflags;
} }
#ifndef CONFIG_MLX5_EN_RXNFC
/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
* otherwise this function will be defined from en_fs_ethtool.c
*/
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
if (info->cmd != ETHTOOL_GRXRINGS) /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
return -EOPNOTSUPP; * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
/* ring_count is needed by ethtool -x */ * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
* is compiled out via CONFIG_MLX5_EN_RXNFC=n.
*/
if (info->cmd == ETHTOOL_GRXRINGS) {
info->data = priv->channels.params.num_channels; info->data = priv->channels.params.num_channels;
return 0; return 0;
}
return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
}
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
return mlx5e_ethtool_set_rxnfc(dev, cmd);
} }
#endif
const struct ethtool_ops mlx5e_ethtool_ops = { const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo, .get_drvinfo = mlx5e_get_drvinfo,
...@@ -1923,9 +1929,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -1923,9 +1929,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh = mlx5e_get_rxfh, .get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh, .set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc, .get_rxnfc = mlx5e_get_rxnfc,
#ifdef CONFIG_MLX5_EN_RXNFC
.set_rxnfc = mlx5e_set_rxnfc, .set_rxnfc = mlx5e_set_rxnfc,
#endif
.get_tunable = mlx5e_get_tunable, .get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable, .set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam, .get_pauseparam = mlx5e_get_pauseparam,
......
...@@ -887,10 +887,10 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, ...@@ -887,10 +887,10 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0; return 0;
} }
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{ {
int err = 0;
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (cmd->cmd) { switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS: case ETHTOOL_SRXCLSRLINS:
...@@ -910,16 +910,13 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -910,16 +910,13 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err; return err;
} }
int mlx5e_get_rxnfc(struct net_device *dev, int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rule_locs) struct ethtool_rxnfc *info, u32 *rule_locs)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0; int err = 0;
switch (info->cmd) { switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = priv->channels.params.num_channels;
break;
case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules; info->rule_cnt = priv->fs.ethtool.tot_num_rules;
break; break;
......
...@@ -3429,7 +3429,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, ...@@ -3429,7 +3429,7 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *cls_flower, struct flow_cls_offload *cls_flower,
int flags) unsigned long flags)
{ {
switch (cls_flower->command) { switch (cls_flower->command) {
case FLOW_CLS_REPLACE: case FLOW_CLS_REPLACE:
...@@ -3449,12 +3449,12 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, ...@@ -3449,12 +3449,12 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv) void *cb_priv)
{ {
unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
struct mlx5e_priv *priv = cb_priv; struct mlx5e_priv *priv = cb_priv;
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
MLX5E_TC_NIC_OFFLOAD);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -3647,7 +3647,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) ...@@ -3647,7 +3647,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) { if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev, netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n"); "Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL; return -EINVAL;
...@@ -3788,9 +3788,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, ...@@ -3788,9 +3788,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
} }
if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
features &= ~NETIF_F_LRO; if (features & NETIF_F_LRO) {
if (params->lro_en)
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
features &= ~NETIF_F_LRO;
}
} }
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
...@@ -3957,7 +3958,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) ...@@ -3957,7 +3958,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */ /* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling cqe compression"); if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false); err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) { if (err) {
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
......
...@@ -659,8 +659,8 @@ mlx5e_rep_indr_offload(struct net_device *netdev, ...@@ -659,8 +659,8 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
struct flow_cls_offload *flower, struct flow_cls_offload *flower,
struct mlx5e_rep_indr_block_priv *indr_priv) struct mlx5e_rep_indr_block_priv *indr_priv)
{ {
unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
int err = 0; int err = 0;
switch (flower->command) { switch (flower->command) {
...@@ -722,10 +722,6 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, ...@@ -722,10 +722,6 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (indr_priv) if (indr_priv)
return -EEXIST; return -EEXIST;
if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb,
indr_priv, &mlx5e_block_cb_list))
return -EBUSY;
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
if (!indr_priv) if (!indr_priv)
return -ENOMEM; return -ENOMEM;
...@@ -1163,12 +1159,12 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, ...@@ -1163,12 +1159,12 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
void *cb_priv) void *cb_priv)
{ {
unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = cb_priv; struct mlx5e_priv *priv = cb_priv;
switch (type) { switch (type) {
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
MLX5E_TC_ESW_OFFLOAD);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1564,6 +1560,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) ...@@ -1564,6 +1560,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
uplink_priv = &rpriv->uplink_priv; uplink_priv = &rpriv->uplink_priv;
mutex_init(&uplink_priv->unready_flows_lock);
INIT_LIST_HEAD(&uplink_priv->unready_flows); INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */ /* init shared tc flow table */
...@@ -1608,6 +1605,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) ...@@ -1608,6 +1605,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
/* delete shared tc flow table */ /* delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
} }
} }
......
...@@ -75,6 +75,8 @@ struct mlx5_rep_uplink_priv { ...@@ -75,6 +75,8 @@ struct mlx5_rep_uplink_priv {
struct mlx5_tun_entropy tun_entropy; struct mlx5_tun_entropy tun_entropy;
/* protects unready_flows */
struct mutex unready_flows_lock;
struct list_head unready_flows; struct list_head unready_flows;
struct work_struct reoffload_flows_work; struct work_struct reoffload_flows_work;
}; };
......
...@@ -40,13 +40,15 @@ ...@@ -40,13 +40,15 @@
#ifdef CONFIG_MLX5_ESWITCH #ifdef CONFIG_MLX5_ESWITCH
enum { enum {
MLX5E_TC_INGRESS = BIT(0), MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_EGRESS = BIT(1), MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_NIC_OFFLOAD = BIT(2), MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_ESW_OFFLOAD = BIT(3), MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
MLX5E_TC_LAST_EXPORTED_BIT = 3, MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
}; };
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
int mlx5e_tc_nic_init(struct mlx5e_priv *priv); int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
...@@ -54,12 +56,12 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht); ...@@ -54,12 +56,12 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, int flags); struct flow_cls_offload *f, unsigned long flags);
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, int flags); struct flow_cls_offload *f, unsigned long flags);
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, int flags); struct flow_cls_offload *f, unsigned long flags);
struct mlx5e_encap_entry; struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
...@@ -70,7 +72,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, ...@@ -70,7 +72,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_neigh_hash_entry; struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags); int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work); void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
...@@ -80,7 +82,11 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, ...@@ -80,7 +82,11 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; } static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
unsigned long flags)
{
return 0;
}
#endif #endif
#endif /* __MLX5_EN_TC_H__ */ #endif /* __MLX5_EN_TC_H__ */
...@@ -1933,6 +1933,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ...@@ -1933,6 +1933,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.encap_tbl);
hash_init(esw->offloads.mod_hdr_tbl); hash_init(esw->offloads.mod_hdr_tbl);
atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock); mutex_init(&esw->state_lock);
mlx5_esw_for_all_vports(esw, i, vport) { mlx5_esw_for_all_vports(esw, i, vport) {
...@@ -2085,23 +2086,19 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, ...@@ -2085,23 +2086,19 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan > 4095 || qos > 7) if (vlan > 4095 || qos > 7)
return -EINVAL; return -EINVAL;
mutex_lock(&esw->state_lock);
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags); err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err) if (err)
goto unlock; return err;
evport->info.vlan = vlan; evport->info.vlan = vlan;
evport->info.qos = qos; evport->info.qos = qos;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) { if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_vport_ingress_config(esw, evport); err = esw_vport_ingress_config(esw, evport);
if (err) if (err)
goto unlock; return err;
err = esw_vport_egress_config(esw, evport); err = esw_vport_egress_config(esw, evport);
} }
unlock:
mutex_unlock(&esw->state_lock);
return err; return err;
} }
...@@ -2109,11 +2106,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, ...@@ -2109,11 +2106,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos) u16 vport, u16 vlan, u8 qos)
{ {
u8 set_flags = 0; u8 set_flags = 0;
int err;
if (vlan || qos) if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags); mutex_lock(&esw->state_lock);
err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
mutex_unlock(&esw->state_lock);
return err;
} }
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/if_link.h> #include <linux/if_link.h>
#include <linux/atomic.h>
#include <net/devlink.h> #include <net/devlink.h>
#include <linux/mlx5/device.h> #include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h> #include <linux/mlx5/eswitch.h>
...@@ -179,7 +180,7 @@ struct mlx5_esw_offload { ...@@ -179,7 +180,7 @@ struct mlx5_esw_offload {
struct mutex termtbl_mutex; /* protects termtbl hash */ struct mutex termtbl_mutex; /* protects termtbl hash */
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode; u8 inline_mode;
u64 num_flows; atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap; enum devlink_eswitch_encap_mode encap;
}; };
......
...@@ -233,7 +233,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -233,7 +233,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
else else
esw->offloads.num_flows++; atomic64_inc(&esw->offloads.num_flows);
return rule; return rule;
...@@ -298,7 +298,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -298,7 +298,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule)) if (IS_ERR(rule))
goto add_err; goto add_err;
esw->offloads.num_flows++; atomic64_inc(&esw->offloads.num_flows);
return rule; return rule;
add_err: add_err:
...@@ -326,7 +326,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -326,7 +326,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl); mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
} }
esw->offloads.num_flows--; atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) { if (fwd_rule) {
esw_put_prio_table(esw, attr->chain, attr->prio, 1); esw_put_prio_table(esw, attr->chain, attr->prio, 1);
...@@ -442,9 +442,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, ...@@ -442,9 +442,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
!attr->dest_chain); !attr->dest_chain);
mutex_lock(&esw->state_lock);
err = esw_add_vlan_action_check(attr, push, pop, fwd); err = esw_add_vlan_action_check(attr, push, pop, fwd);
if (err) if (err)
return err; goto unlock;
attr->vlan_handled = false; attr->vlan_handled = false;
...@@ -457,11 +459,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, ...@@ -457,11 +459,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
attr->vlan_handled = true; attr->vlan_handled = true;
} }
return 0; goto unlock;
} }
if (!push && !pop) if (!push && !pop)
return 0; goto unlock;
if (!(offloads->vlan_push_pop_refcount)) { if (!(offloads->vlan_push_pop_refcount)) {
/* it's the 1st vlan rule, apply global vlan pop policy */ /* it's the 1st vlan rule, apply global vlan pop policy */
...@@ -486,6 +488,8 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, ...@@ -486,6 +488,8 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
out: out:
if (!err) if (!err)
attr->vlan_handled = true; attr->vlan_handled = true;
unlock:
mutex_unlock(&esw->state_lock);
return err; return err;
} }
...@@ -508,6 +512,8 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -508,6 +512,8 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST); fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
mutex_lock(&esw->state_lock);
vport = esw_vlan_action_get_vport(attr, push, pop); vport = esw_vlan_action_get_vport(attr, push, pop);
if (!push && !pop && fwd) { if (!push && !pop && fwd) {
...@@ -515,7 +521,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -515,7 +521,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--; vport->vlan_refcount--;
return 0; goto out;
} }
if (push) { if (push) {
...@@ -533,12 +539,13 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -533,12 +539,13 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
skip_unset_push: skip_unset_push:
offloads->vlan_push_pop_refcount--; offloads->vlan_push_pop_refcount--;
if (offloads->vlan_push_pop_refcount) if (offloads->vlan_push_pop_refcount)
return 0; goto out;
/* no more vlan rules, stop global vlan pop policy */ /* no more vlan rules, stop global vlan pop policy */
err = esw_set_global_vlan_pop(esw, 0); err = esw_set_global_vlan_pop(esw, 0);
out: out:
mutex_unlock(&esw->state_lock);
return err; return err;
} }
...@@ -2349,7 +2356,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, ...@@ -2349,7 +2356,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
break; break;
} }
if (esw->offloads.num_flows > 0) { if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured"); "Can't set inline mode when flows are configured");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -2459,7 +2466,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, ...@@ -2459,7 +2466,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (esw->offloads.encap == encap) if (esw->offloads.encap == encap)
return 0; return 0;
if (esw->offloads.num_flows > 0) { if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured"); "Can't set encapsulation when flows are configured");
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment