Commit a3b1933d authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2019-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-03-11

For -stable v5.0:
('net/mlx5e: Fix access to non-existing receive queue')
('net/mlx5e: Properly get the PF number phys port name ndo')
('net/mlx5: Fix multiple updates of steering rules in parallel')
('net/mlx5: Avoid panic when setting vport mac, getting vport config')
('net/mlx5: Avoid panic when setting vport rate')
('net/mlx5e: IPoIB, Fix RX checksum statistics update')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c6873d18 24319258
...@@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, ...@@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params; priv->channels.params = new_channels.params;
if (!netif_is_rxfh_configured(priv->netdev))
mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
goto out; goto out;
} }
......
...@@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, ...@@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch_rep *rep = rpriv->rep;
int ret, pf_num; unsigned int fn;
int ret;
ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num); fn = PCI_FUNC(priv->mdev->pdev->devfn);
if (ret) if (fn >= MLX5_MAX_PORTS)
return ret; return -EOPNOTSUPP;
if (rep->vport == MLX5_VPORT_UPLINK) if (rep->vport == MLX5_VPORT_UPLINK)
ret = snprintf(buf, len, "p%d", pf_num); ret = snprintf(buf, len, "p%d", fn);
else else
ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1);
if (ret >= len) if (ret >= len)
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -1295,8 +1295,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -1295,8 +1295,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->protocol = *((__be16 *)(skb->data)); skb->protocol = *((__be16 *)(skb->data));
skb->ip_summed = CHECKSUM_COMPLETE; if (netdev->features & NETIF_F_RXCSUM) {
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
stats->csum_complete++;
} else {
skb->ip_summed = CHECKSUM_NONE;
stats->csum_none++;
}
if (unlikely(mlx5e_rx_hw_stamp(tstamp))) if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp = skb_hwtstamps(skb)->hwtstamp =
...@@ -1315,7 +1321,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -1315,7 +1321,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->dev = netdev; skb->dev = netdev;
stats->csum_complete++;
stats->packets++; stats->packets++;
stats->bytes += cqe_bcnt; stats->bytes += cqe_bcnt;
} }
......
...@@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, ...@@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u64 node_guid; u64 node_guid;
int err = 0; int err = 0;
if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM; return -EPERM;
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL; return -EINVAL;
...@@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ...@@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
{ {
struct mlx5_vport *evport; struct mlx5_vport *evport;
if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM; return -EPERM;
if (!LEGAL_VPORT(esw, vport)) if (!LEGAL_VPORT(esw, vport))
return -EINVAL; return -EINVAL;
...@@ -2297,19 +2297,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) ...@@ -2297,19 +2297,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate) u32 max_rate, u32 min_rate)
{ {
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
struct mlx5_vport *evport; struct mlx5_vport *evport;
u32 fw_max_bw_share;
u32 previous_min_rate; u32 previous_min_rate;
u32 divider; u32 divider;
bool min_rate_supported;
bool max_rate_supported;
int err = 0; int err = 0;
if (!ESW_ALLOWED(esw)) if (!ESW_ALLOWED(esw))
return -EPERM; return -EPERM;
if (!LEGAL_VPORT(esw, vport)) if (!LEGAL_VPORT(esw, vport))
return -EINVAL; return -EINVAL;
fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
fw_max_bw_share >= MLX5_MIN_BW_SHARE;
max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
...@@ -263,10 +263,11 @@ static void nested_down_write_ref_node(struct fs_node *node, ...@@ -263,10 +263,11 @@ static void nested_down_write_ref_node(struct fs_node *node,
} }
} }
static void down_write_ref_node(struct fs_node *node) static void down_write_ref_node(struct fs_node *node, bool locked)
{ {
if (node) { if (node) {
down_write(&node->lock); if (!locked)
down_write(&node->lock);
refcount_inc(&node->refcount); refcount_inc(&node->refcount);
} }
} }
...@@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node) ...@@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node)
up_read(&node->lock); up_read(&node->lock);
} }
static void up_write_ref_node(struct fs_node *node) static void up_write_ref_node(struct fs_node *node, bool locked)
{ {
refcount_dec(&node->refcount); refcount_dec(&node->refcount);
up_write(&node->lock); if (!locked)
up_write(&node->lock);
} }
static void tree_put_node(struct fs_node *node) static void tree_put_node(struct fs_node *node, bool locked)
{ {
struct fs_node *parent_node = node->parent; struct fs_node *parent_node = node->parent;
...@@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node) ...@@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node)
/* Only root namespace doesn't have parent and we just /* Only root namespace doesn't have parent and we just
* need to free its node. * need to free its node.
*/ */
down_write_ref_node(parent_node); down_write_ref_node(parent_node, locked);
list_del_init(&node->list); list_del_init(&node->list);
if (node->del_sw_func) if (node->del_sw_func)
node->del_sw_func(node); node->del_sw_func(node);
up_write_ref_node(parent_node); up_write_ref_node(parent_node, locked);
} else { } else {
kfree(node); kfree(node);
} }
node = NULL; node = NULL;
} }
if (!node && parent_node) if (!node && parent_node)
tree_put_node(parent_node); tree_put_node(parent_node, locked);
} }
static int tree_remove_node(struct fs_node *node) static int tree_remove_node(struct fs_node *node, bool locked)
{ {
if (refcount_read(&node->refcount) > 1) { if (refcount_read(&node->refcount) > 1) {
refcount_dec(&node->refcount); refcount_dec(&node->refcount);
return -EEXIST; return -EEXIST;
} }
tree_put_node(node); tree_put_node(node, locked);
return 0; return 0;
} }
...@@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node) ...@@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node)
kfree(ft); kfree(ft);
} }
static void del_sw_hw_rule(struct fs_node *node) static void modify_fte(struct fs_fte *fte)
{ {
struct mlx5_flow_root_namespace *root; struct mlx5_flow_root_namespace *root;
struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
struct fs_fte *fte; struct mlx5_core_dev *dev;
int modify_mask;
struct mlx5_core_dev *dev = get_dev(node);
int err; int err;
bool update_fte = false;
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
fs_get_obj(fg, fte->node.parent); fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent); fs_get_obj(ft, fg->node.parent);
dev = get_dev(&fte->node);
root = find_root(&ft->node);
err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
fte->modify_mask = 0;
}
static void del_sw_hw_rule(struct fs_node *node)
{
struct mlx5_flow_rule *rule;
struct fs_fte *fte;
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
trace_mlx5_fs_del_rule(rule); trace_mlx5_fs_del_rule(rule);
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
mutex_lock(&rule->dest_attr.ft->lock); mutex_lock(&rule->dest_attr.ft->lock);
...@@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node) ...@@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node)
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
--fte->dests_size) { --fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | fte->modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
update_fte = true;
goto out; goto out;
} }
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) { --fte->dests_size) {
modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); fte->modify_mask |=
update_fte = true; BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
} }
out: out:
root = find_root(&ft->node);
if (update_fte && fte->dests_size) {
err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
}
kfree(rule); kfree(rule);
} }
...@@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node) ...@@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node)
mlx5_core_warn(dev, mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n", "flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id); fte->index, fg->id);
node->active = 0;
} }
} }
...@@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, ...@@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
fte->node.type = FS_TYPE_FLOW_ENTRY; fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->action = *flow_act; fte->action = *flow_act;
tree_init_node(&fte->node, del_hw_fte, del_sw_fte); tree_init_node(&fte->node, NULL, del_sw_fte);
return fte; return fte;
} }
...@@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, ...@@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
fs_get_obj(fte, rule->node.parent); fs_get_obj(fte, rule->node.parent);
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL; return -EINVAL;
down_write_ref_node(&fte->node); down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent); fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent); fs_get_obj(ft, fg->node.parent);
...@@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, ...@@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
root = find_root(&ft->node); root = find_root(&ft->node);
err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
modify_mask, fte); modify_mask, fte);
up_write_ref_node(&fte->node); up_write_ref_node(&fte->node, false);
return err; return err;
} }
...@@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa ...@@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
if (err) if (err)
goto destroy_ft; goto destroy_ft;
ft->node.active = true; ft->node.active = true;
down_write_ref_node(&fs_prio->node); down_write_ref_node(&fs_prio->node, false);
tree_add_node(&ft->node, &fs_prio->node); tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio); list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++; fs_prio->num_ft++;
up_write_ref_node(&fs_prio->node); up_write_ref_node(&fs_prio->node, false);
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
trace_mlx5_fs_add_ft(ft); trace_mlx5_fs_add_ft(ft);
return ft; return ft;
...@@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, ...@@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (ft->autogroup.active) if (ft->autogroup.active)
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
down_write_ref_node(&ft->node); down_write_ref_node(&ft->node, false);
fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
start_index, end_index, start_index, end_index,
ft->node.children.prev); ft->node.children.prev);
up_write_ref_node(&ft->node); up_write_ref_node(&ft->node, false);
if (IS_ERR(fg)) if (IS_ERR(fg))
return fg; return fg;
err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
if (err) { if (err) {
tree_put_node(&fg->node); tree_put_node(&fg->node, false);
return ERR_PTR(err); return ERR_PTR(err);
} }
trace_mlx5_fs_add_fg(fg); trace_mlx5_fs_add_fg(fg);
...@@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head) ...@@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head)
struct match_list *iter, *match_tmp; struct match_list *iter, *match_tmp;
list_del(&head->first.list); list_del(&head->first.list);
tree_put_node(&head->first.g->node); tree_put_node(&head->first.g->node, false);
list_for_each_entry_safe(iter, match_tmp, &head->list, list_for_each_entry_safe(iter, match_tmp, &head->list,
list) { list) {
tree_put_node(&iter->g->node); tree_put_node(&iter->g->node, false);
list_del(&iter->list); list_del(&iter->list);
kfree(iter); kfree(iter);
} }
...@@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g, ...@@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g,
fte_tmp = NULL; fte_tmp = NULL;
goto out; goto out;
} }
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
goto out;
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out: out:
if (take_write) if (take_write)
up_write_ref_node(&g->node); up_write_ref_node(&g->node, false);
else else
up_read_ref_node(&g->node); up_read_ref_node(&g->node);
return fte_tmp; return fte_tmp;
...@@ -1647,8 +1659,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1647,8 +1659,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
continue; continue;
rule = add_rule_fg(g, spec->match_value, rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte_tmp); flow_act, dest, dest_num, fte_tmp);
up_write_ref_node(&fte_tmp->node); up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node); tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte); kmem_cache_free(steering->ftes_cache, fte);
return rule; return rule;
} }
...@@ -1684,7 +1696,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1684,7 +1696,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
err = insert_fte(g, fte); err = insert_fte(g, fte);
if (err) { if (err) {
up_write_ref_node(&g->node); up_write_ref_node(&g->node, false);
if (err == -ENOSPC) if (err == -ENOSPC)
continue; continue;
kmem_cache_free(steering->ftes_cache, fte); kmem_cache_free(steering->ftes_cache, fte);
...@@ -1692,11 +1704,11 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1692,11 +1704,11 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
} }
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node); up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec->match_value, rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte); flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node); up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node); tree_put_node(&fte->node, false);
return rule; return rule;
} }
rule = ERR_PTR(-ENOENT); rule = ERR_PTR(-ENOENT);
...@@ -1738,7 +1750,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1738,7 +1750,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
err = build_match_list(&match_head, ft, spec); err = build_match_list(&match_head, ft, spec);
if (err) { if (err) {
if (take_write) if (take_write)
up_write_ref_node(&ft->node); up_write_ref_node(&ft->node, false);
else else
up_read_ref_node(&ft->node); up_read_ref_node(&ft->node);
return ERR_PTR(err); return ERR_PTR(err);
...@@ -1753,7 +1765,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1753,7 +1765,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!IS_ERR(rule) || if (!IS_ERR(rule) ||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
if (take_write) if (take_write)
up_write_ref_node(&ft->node); up_write_ref_node(&ft->node, false);
return rule; return rule;
} }
...@@ -1769,12 +1781,12 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1769,12 +1781,12 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
g = alloc_auto_flow_group(ft, spec); g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) { if (IS_ERR(g)) {
rule = ERR_CAST(g); rule = ERR_CAST(g);
up_write_ref_node(&ft->node); up_write_ref_node(&ft->node, false);
return rule; return rule;
} }
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
up_write_ref_node(&ft->node); up_write_ref_node(&ft->node, false);
err = create_auto_flow_group(ft, g); err = create_auto_flow_group(ft, g);
if (err) if (err)
...@@ -1793,17 +1805,17 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1793,17 +1805,17 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
} }
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
up_write_ref_node(&g->node); up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec->match_value, flow_act, dest, rule = add_rule_fg(g, spec->match_value, flow_act, dest,
dest_num, fte); dest_num, fte);
up_write_ref_node(&fte->node); up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node); tree_put_node(&fte->node, false);
tree_put_node(&g->node); tree_put_node(&g->node, false);
return rule; return rule;
err_release_fg: err_release_fg:
up_write_ref_node(&g->node); up_write_ref_node(&g->node, false);
tree_put_node(&g->node); tree_put_node(&g->node, false);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules); ...@@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules);
void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
{ {
struct fs_fte *fte;
int i; int i;
/* In order to consolidate the HW changes we lock the FTE for other
* changes, and increase its refcount, in order not to perform the
* "del" functions of the FTE. Will handle them here.
* The removal of the rules is done under locked FTE.
* After removing all the handle's rules, if there are remaining
* rules, it means we just need to modify the FTE in FW, and
* unlock/decrease the refcount we increased before.
* Otherwise, it means the FTE should be deleted. First delete the
* FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
* the FTE, which will handle the last decrease of the refcount, as
* well as required handling of its parent.
*/
fs_get_obj(fte, handle->rule[0]->node.parent);
down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--) for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node); tree_remove_node(&handle->rule[i]->node, true);
if (fte->modify_mask && fte->dests_size) {
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else {
del_hw_fte(&fte->node);
up_write(&fte->node.lock);
tree_put_node(&fte->node, false);
}
kfree(handle); kfree(handle);
} }
EXPORT_SYMBOL(mlx5_del_flow_rules); EXPORT_SYMBOL(mlx5_del_flow_rules);
...@@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) ...@@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
return err; return err;
} }
if (tree_remove_node(&ft->node)) if (tree_remove_node(&ft->node, false))
mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
ft->id); ft->id);
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
...@@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table); ...@@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
{ {
if (tree_remove_node(&fg->node)) if (tree_remove_node(&fg->node, false))
mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
fg->id); fg->id);
} }
...@@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node) ...@@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node)
tree_get_node(node); tree_get_node(node);
list_for_each_entry_safe(iter, temp, &node->children, list) list_for_each_entry_safe(iter, temp, &node->children, list)
clean_tree(iter); clean_tree(iter);
tree_put_node(node); tree_put_node(node, false);
tree_remove_node(node); tree_remove_node(node, false);
} }
} }
......
...@@ -172,6 +172,7 @@ struct fs_fte { ...@@ -172,6 +172,7 @@ struct fs_fte {
enum fs_fte_status status; enum fs_fte_status status;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct rhash_head hash; struct rhash_head hash;
int modify_mask;
}; };
/* Type of children is mlx5_flow_table/namespace */ /* Type of children is mlx5_flow_table/namespace */
......
...@@ -595,27 +595,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) ...@@ -595,27 +595,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
err); err);
} }
int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
{
struct mlx5_lag *ldev;
int n;
ldev = mlx5_lag_dev_get(dev);
if (!ldev) {
mlx5_core_warn(dev, "no lag device, can't get pf num\n");
return -EINVAL;
}
for (n = 0; n < MLX5_MAX_PORTS; n++)
if (ldev->pf[n].dev == dev) {
*pf_num = n;
return 0;
}
mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
return -EINVAL;
}
/* Must be called with intf_mutex held */ /* Must be called with intf_mutex held */
void mlx5_lag_remove(struct mlx5_core_dev *dev) void mlx5_lag_remove(struct mlx5_core_dev *dev)
{ {
......
...@@ -188,8 +188,6 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) ...@@ -188,8 +188,6 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master); MLX5_CAP_GEN(dev, lag_master);
} }
int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
void mlx5_lag_update(struct mlx5_core_dev *dev); void mlx5_lag_update(struct mlx5_core_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment