Commit c101fffc authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2020-01-06' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2020-01-06

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

For -stable v5.3
 ('net/mlx5: Move devlink registration before interfaces load')

For -stable v5.4
 ('net/mlx5e: Fix hairpin RSS table size')
 ('net/mlx5: DR, Init lists that are used in rule's member')
 ('net/mlx5e: Always print health reporter message to dmesg')
 ('net/mlx5: DR, No need for atomic refcount for internal SW steering resources')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d76063c5 df55c558
...@@ -122,6 +122,22 @@ enum { ...@@ -122,6 +122,22 @@ enum {
#endif #endif
}; };
#define MLX5E_TTC_NUM_GROUPS 3
#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
#define MLX5E_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
MLX5E_TTC_GROUP2_SIZE +\
MLX5E_TTC_GROUP3_SIZE)
#define MLX5E_INNER_TTC_NUM_GROUPS 3
#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
MLX5E_INNER_TTC_GROUP2_SIZE +\
MLX5E_INNER_TTC_GROUP3_SIZE)
#ifdef CONFIG_MLX5_EN_RXNFC #ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table { struct mlx5e_ethtool_table {
......
...@@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv, ...@@ -197,9 +197,10 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
struct devlink_health_reporter *reporter, char *err_str, struct devlink_health_reporter *reporter, char *err_str,
struct mlx5e_err_ctx *err_ctx) struct mlx5e_err_ctx *err_ctx)
{ {
if (!reporter) { netdev_err(priv->netdev, err_str);
netdev_err(priv->netdev, err_str);
if (!reporter)
return err_ctx->recover(&err_ctx->ctx); return err_ctx->recover(&err_ctx->ctx);
}
return devlink_health_report(reporter, err_str, err_ctx); return devlink_health_report(reporter, err_str, err_ctx);
} }
...@@ -904,22 +904,6 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, ...@@ -904,22 +904,6 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
return err; return err;
} }
#define MLX5E_TTC_NUM_GROUPS 3
#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT)
#define MLX5E_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
MLX5E_TTC_GROUP2_SIZE +\
MLX5E_TTC_GROUP3_SIZE)
#define MLX5E_INNER_TTC_NUM_GROUPS 3
#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3)
#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1)
#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0)
#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\
MLX5E_INNER_TTC_GROUP2_SIZE +\
MLX5E_INNER_TTC_GROUP3_SIZE)
static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
bool use_ipv) bool use_ipv)
{ {
......
...@@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, ...@@ -592,7 +592,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
ft_attr->max_fte = MLX5E_NUM_TT; ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO; ft_attr->prio = MLX5E_TC_PRIO;
} }
...@@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info ...@@ -2999,6 +2999,25 @@ static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info
return kmemdup(tun_info, tun_size, GFP_KERNEL); return kmemdup(tun_info, tun_size, GFP_KERNEL);
} }
static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
int out_index,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack)
{
int i;
for (i = 0; i < out_index; i++) {
if (flow->encaps[i].e != e)
continue;
NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
netdev_err(priv->netdev, "can't duplicate encap action\n");
return true;
}
return false;
}
static int mlx5e_attach_encap(struct mlx5e_priv *priv, static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev, struct net_device *mirred_dev,
...@@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -3034,6 +3053,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
/* must verify if encap is valid or not */ /* must verify if encap is valid or not */
if (e) { if (e) {
/* Check that entry was not already attached to this flow */
if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
err = -EOPNOTSUPP;
goto out_err;
}
mutex_unlock(&esw->offloads.encap_tbl_lock); mutex_unlock(&esw->offloads.encap_tbl_lock);
wait_for_completion(&e->res_ready); wait_for_completion(&e->res_ready);
...@@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, ...@@ -3220,6 +3245,26 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
same_hw_devs(priv, netdev_priv(out_dev)); same_hw_devs(priv, netdev_priv(out_dev));
} }
static bool is_duplicated_output_device(struct net_device *dev,
struct net_device *out_dev,
int *ifindexes, int if_count,
struct netlink_ext_ack *extack)
{
int i;
for (i = 0; i < if_count; i++) {
if (ifindexes[i] == out_dev->ifindex) {
NL_SET_ERR_MSG_MOD(extack,
"can't duplicate output to same device");
netdev_err(dev, "can't duplicate output to same device: %s\n",
out_dev->name);
return true;
}
}
return false;
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action, struct flow_action *flow_action,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
...@@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3231,11 +3276,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL; const struct ip_tunnel_info *info = NULL;
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow); bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act; const struct flow_action_entry *act;
int err, i, if_count = 0;
bool encap = false; bool encap = false;
u32 action = 0; u32 action = 0;
int err, i;
if (!flow_action_has_entries(flow_action)) if (!flow_action_has_entries(flow_action))
return -EINVAL; return -EINVAL;
...@@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, ...@@ -3312,6 +3358,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper; struct net_device *uplink_upper;
if (is_duplicated_output_device(priv->netdev,
out_dev,
ifindexes,
if_count,
extack))
return -EOPNOTSUPP;
ifindexes[if_count] = out_dev->ifindex;
if_count++;
rcu_read_lock(); rcu_read_lock();
uplink_upper = uplink_upper =
netdev_master_upper_dev_get_rcu(uplink_dev); netdev_master_upper_dev_get_rcu(uplink_dev);
......
...@@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node) ...@@ -531,16 +531,9 @@ static void del_hw_fte(struct fs_node *node)
} }
} }
static void del_sw_fte_rcu(struct rcu_head *head)
{
struct fs_fte *fte = container_of(head, struct fs_fte, rcu);
struct mlx5_flow_steering *steering = get_steering(&fte->node);
kmem_cache_free(steering->ftes_cache, fte);
}
static void del_sw_fte(struct fs_node *node) static void del_sw_fte(struct fs_node *node)
{ {
struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg; struct mlx5_flow_group *fg;
struct fs_fte *fte; struct fs_fte *fte;
int err; int err;
...@@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node) ...@@ -553,8 +546,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte); rhash_fte);
WARN_ON(err); WARN_ON(err);
ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
kmem_cache_free(steering->ftes_cache, fte);
call_rcu(&fte->rcu, del_sw_fte_rcu);
} }
static void del_hw_flow_group(struct fs_node *node) static void del_hw_flow_group(struct fs_node *node)
...@@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head) ...@@ -1633,47 +1625,22 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
} }
static struct fs_fte * static struct fs_fte *
lookup_fte_for_write_locked(struct mlx5_flow_group *g, const u32 *match_value) lookup_fte_locked(struct mlx5_flow_group *g,
const u32 *match_value,
bool take_write)
{ {
struct fs_fte *fte_tmp; struct fs_fte *fte_tmp;
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); if (take_write)
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, rhash_fte); else
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
fte_tmp = NULL; fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
goto out; rhash_fte);
}
if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL;
goto out;
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
up_write_ref_node(&g->node, false);
return fte_tmp;
}
static struct fs_fte *
lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
{
struct fs_fte *fte_tmp;
if (!tree_get_node(&g->node))
return NULL;
rcu_read_lock();
fte_tmp = rhashtable_lookup(&g->ftes_hash, match_value, rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
rcu_read_unlock();
fte_tmp = NULL; fte_tmp = NULL;
goto out; goto out;
} }
rcu_read_unlock();
if (!fte_tmp->node.active) { if (!fte_tmp->node.active) {
tree_put_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false);
fte_tmp = NULL; fte_tmp = NULL;
...@@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value) ...@@ -1681,19 +1648,12 @@ lookup_fte_for_read_locked(struct mlx5_flow_group *g, const u32 *match_value)
} }
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out: out:
tree_put_node(&g->node, false); if (take_write)
return fte_tmp; up_write_ref_node(&g->node, false);
}
static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group *g, const u32 *match_value, bool write)
{
if (write)
return lookup_fte_for_write_locked(g, match_value);
else else
return lookup_fte_for_read_locked(g, match_value); up_read_ref_node(&g->node);
return fte_tmp;
} }
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
......
...@@ -203,7 +203,6 @@ struct fs_fte { ...@@ -203,7 +203,6 @@ struct fs_fte {
enum fs_fte_status status; enum fs_fte_status status;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct rhash_head hash; struct rhash_head hash;
struct rcu_head rcu;
int modify_mask; int modify_mask;
}; };
......
...@@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) ...@@ -1193,6 +1193,12 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
if (err) if (err)
goto err_load; goto err_load;
if (boot) {
err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
if (err)
goto err_devlink_reg;
}
if (mlx5_device_registered(dev)) { if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev); mlx5_attach_device(dev);
} else { } else {
...@@ -1210,6 +1216,9 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) ...@@ -1210,6 +1216,9 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
return err; return err;
err_reg_dev: err_reg_dev:
if (boot)
mlx5_devlink_unregister(priv_to_devlink(dev));
err_devlink_reg:
mlx5_unload(dev); mlx5_unload(dev);
err_load: err_load:
if (boot) if (boot)
...@@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1347,10 +1356,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
request_module_nowait(MLX5_IB_MOD); request_module_nowait(MLX5_IB_MOD);
err = mlx5_devlink_register(devlink, &pdev->dev);
if (err)
goto clean_load;
err = mlx5_crdump_enable(dev); err = mlx5_crdump_enable(dev);
if (err) if (err)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
...@@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1358,9 +1363,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev); pci_save_state(pdev);
return 0; return 0;
clean_load:
mlx5_unload_one(dev, true);
err_load_one: err_load_one:
mlx5_pci_close(dev); mlx5_pci_close(dev);
pci_init_err: pci_init_err:
......
...@@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, ...@@ -209,7 +209,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
/* We need to copy the refcount since this ste /* We need to copy the refcount since this ste
* may have been traversed several times * may have been traversed several times
*/ */
refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount)); new_ste->refcount = cur_ste->refcount;
/* Link old STEs rule_mem list to the new ste */ /* Link old STEs rule_mem list to the new ste */
mlx5dr_rule_update_rule_member(cur_ste, new_ste); mlx5dr_rule_update_rule_member(cur_ste, new_ste);
...@@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, ...@@ -638,6 +638,9 @@ static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
if (!rule_mem) if (!rule_mem)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&rule_mem->list);
INIT_LIST_HEAD(&rule_mem->use_ste_list);
rule_mem->ste = ste; rule_mem->ste = ste;
list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
......
...@@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) ...@@ -348,7 +348,7 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
if (dst->next_htbl) if (dst->next_htbl)
dst->next_htbl->pointing_ste = dst; dst->next_htbl->pointing_ste = dst;
refcount_set(&dst->refcount, refcount_read(&src->refcount)); dst->refcount = src->refcount;
INIT_LIST_HEAD(&dst->rule_list); INIT_LIST_HEAD(&dst->rule_list);
list_splice_tail_init(&src->rule_list, &dst->rule_list); list_splice_tail_init(&src->rule_list, &dst->rule_list);
...@@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste) ...@@ -565,7 +565,7 @@ bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste) bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
{ {
return !refcount_read(&ste->refcount); return !ste->refcount;
} }
/* Init one ste as a pattern for ste data array */ /* Init one ste as a pattern for ste data array */
...@@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, ...@@ -689,14 +689,14 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
htbl->ste_arr = chunk->ste_arr; htbl->ste_arr = chunk->ste_arr;
htbl->hw_ste_arr = chunk->hw_ste_arr; htbl->hw_ste_arr = chunk->hw_ste_arr;
htbl->miss_list = chunk->miss_list; htbl->miss_list = chunk->miss_list;
refcount_set(&htbl->refcount, 0); htbl->refcount = 0;
for (i = 0; i < chunk->num_of_entries; i++) { for (i = 0; i < chunk->num_of_entries; i++) {
struct mlx5dr_ste *ste = &htbl->ste_arr[i]; struct mlx5dr_ste *ste = &htbl->ste_arr[i];
ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
ste->htbl = htbl; ste->htbl = htbl;
refcount_set(&ste->refcount, 0); ste->refcount = 0;
INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&ste->miss_list_node);
INIT_LIST_HEAD(&htbl->miss_list[i]); INIT_LIST_HEAD(&htbl->miss_list[i]);
INIT_LIST_HEAD(&ste->rule_list); INIT_LIST_HEAD(&ste->rule_list);
...@@ -713,7 +713,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, ...@@ -713,7 +713,7 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
{ {
if (refcount_read(&htbl->refcount)) if (htbl->refcount)
return -EBUSY; return -EBUSY;
mlx5dr_icm_free_chunk(htbl->chunk); mlx5dr_icm_free_chunk(htbl->chunk);
......
...@@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx; ...@@ -123,7 +123,7 @@ struct mlx5dr_matcher_rx_tx;
struct mlx5dr_ste { struct mlx5dr_ste {
u8 *hw_ste; u8 *hw_ste;
/* refcount: indicates the num of rules that using this ste */ /* refcount: indicates the num of rules that using this ste */
refcount_t refcount; u32 refcount;
/* attached to the miss_list head at each htbl entry */ /* attached to the miss_list head at each htbl entry */
struct list_head miss_list_node; struct list_head miss_list_node;
...@@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl { ...@@ -155,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
struct mlx5dr_ste_htbl { struct mlx5dr_ste_htbl {
u8 lu_type; u8 lu_type;
u16 byte_mask; u16 byte_mask;
refcount_t refcount; u32 refcount;
struct mlx5dr_icm_chunk *chunk; struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste *ste_arr; struct mlx5dr_ste *ste_arr;
u8 *hw_ste_arr; u8 *hw_ste_arr;
...@@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl); ...@@ -206,13 +206,14 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl) static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
{ {
if (refcount_dec_and_test(&htbl->refcount)) htbl->refcount--;
if (!htbl->refcount)
mlx5dr_ste_htbl_free(htbl); mlx5dr_ste_htbl_free(htbl);
} }
static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl) static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
{ {
refcount_inc(&htbl->refcount); htbl->refcount++;
} }
/* STE utils */ /* STE utils */
...@@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste, ...@@ -254,14 +255,15 @@ static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher, struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher) struct mlx5dr_matcher_rx_tx *nic_matcher)
{ {
if (refcount_dec_and_test(&ste->refcount)) ste->refcount--;
if (!ste->refcount)
mlx5dr_ste_free(ste, matcher, nic_matcher); mlx5dr_ste_free(ste, matcher, nic_matcher);
} }
/* initial as 0, increased only when ste appears in a new rule */ /* initial as 0, increased only when ste appears in a new rule */
static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste) static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
{ {
refcount_inc(&ste->refcount); ste->refcount++;
} }
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste, void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment