Commit ed03a418 authored by Alex Vesker's avatar Alex Vesker Committed by Saeed Mahameed

net/mlx5: DR, Split RX and TX lock for parallel insertion

Change the locking flow to support RX and TX locks, splitting
the single lock to two will allow inserting rules in parallel
for RX and TX parts of the FDB.

Locking the dr_domain will be done by locking the RX domain
and the TX domain locks, this is mostly used for control operations
on the dr_domain. When inserting rules for RX or TX the single
nic_doamin RX or TX lock will be used. Splitting the lock is safe since
RX and TX domains are logically separated from each other, shared
objects such the send-ring and memory pool are protected by locks.
Signed-off-by: default avatarAlex Vesker <valex@mellanox.com>
Reviewed-by: default avatarMark Bloch <markb@mellanox.com>
Reviewed-by: default avatarErez Shitrit <erezsh@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent cedb2819
......@@ -297,7 +297,8 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
dmn->mdev = mdev;
dmn->type = type;
refcount_set(&dmn->refcount, 1);
mutex_init(&dmn->mutex);
mutex_init(&dmn->info.rx.mutex);
mutex_init(&dmn->info.tx.mutex);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
......@@ -345,9 +346,9 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
int ret = 0;
if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
mutex_lock(&dmn->mutex);
mlx5dr_domain_lock(dmn);
ret = mlx5dr_send_ring_force_drain(dmn);
mutex_unlock(&dmn->mutex);
mlx5dr_domain_unlock(dmn);
if (ret) {
mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
flags, ret);
......@@ -371,7 +372,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_cache(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
mutex_destroy(&dmn->mutex);
mutex_destroy(&dmn->info.tx.mutex);
mutex_destroy(&dmn->info.rx.mutex);
kfree(dmn);
return 0;
}
......@@ -379,7 +381,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn)
{
mutex_lock(&dmn->mutex);
mlx5dr_domain_lock(dmn);
if (dmn->peer_dmn)
refcount_dec(&dmn->peer_dmn->refcount);
......@@ -389,5 +391,5 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
if (dmn->peer_dmn)
refcount_inc(&dmn->peer_dmn->refcount);
mutex_unlock(&dmn->mutex);
mlx5dr_domain_unlock(dmn);
}
......@@ -690,7 +690,7 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
refcount_set(&matcher->refcount, 1);
INIT_LIST_HEAD(&matcher->matcher_list);
mutex_lock(&tbl->dmn->mutex);
mlx5dr_domain_lock(tbl->dmn);
ret = dr_matcher_init(matcher, mask);
if (ret)
......@@ -700,14 +700,14 @@ mlx5dr_matcher_create(struct mlx5dr_table *tbl,
if (ret)
goto matcher_uninit;
mutex_unlock(&tbl->dmn->mutex);
mlx5dr_domain_unlock(tbl->dmn);
return matcher;
matcher_uninit:
dr_matcher_uninit(matcher);
free_matcher:
mutex_unlock(&tbl->dmn->mutex);
mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
dec_ref:
refcount_dec(&tbl->refcount);
......@@ -791,13 +791,13 @@ int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
if (refcount_read(&matcher->refcount) > 1)
return -EBUSY;
mutex_lock(&tbl->dmn->mutex);
mlx5dr_domain_lock(tbl->dmn);
dr_matcher_remove_from_tbl(matcher);
dr_matcher_uninit(matcher);
refcount_dec(&matcher->tbl->refcount);
mutex_unlock(&tbl->dmn->mutex);
mlx5dr_domain_unlock(tbl->dmn);
kfree(matcher);
return 0;
......
......@@ -938,7 +938,10 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
struct mlx5dr_rule_rx_tx *nic_rule)
{
mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
dr_rule_clean_rule_members(rule, nic_rule);
mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
return 0;
}
......@@ -1039,18 +1042,18 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
return 0;
hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
if (!hw_ste_arr)
return -ENOMEM;
mlx5dr_domain_nic_lock(nic_dmn);
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
goto out_err;
hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
if (!hw_ste_arr) {
ret = -ENOMEM;
goto out_err;
}
goto free_hw_ste;
/* Set the tag values inside the ste array */
ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
......@@ -1115,6 +1118,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
if (htbl)
mlx5dr_htbl_put(htbl);
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
return 0;
......@@ -1129,8 +1134,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
kfree(ste_info);
}
free_hw_ste:
mlx5dr_domain_nic_unlock(nic_dmn);
kfree(hw_ste_arr);
out_err:
return ret;
}
......@@ -1232,31 +1237,23 @@ struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_rule *rule;
mutex_lock(&matcher->tbl->dmn->mutex);
refcount_inc(&matcher->refcount);
rule = dr_rule_create_rule(matcher, value, num_actions, actions);
if (!rule)
refcount_dec(&matcher->refcount);
mutex_unlock(&matcher->tbl->dmn->mutex);
return rule;
}
int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
{
struct mlx5dr_matcher *matcher = rule->matcher;
struct mlx5dr_table *tbl = rule->matcher->tbl;
int ret;
mutex_lock(&tbl->dmn->mutex);
ret = dr_rule_destroy_rule(rule);
mutex_unlock(&tbl->dmn->mutex);
if (!ret)
refcount_dec(&matcher->refcount);
return ret;
}
......@@ -14,7 +14,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
if (action && action->action_type != DR_ACTION_TYP_FT)
return -EOPNOTSUPP;
mutex_lock(&tbl->dmn->mutex);
mlx5dr_domain_lock(tbl->dmn);
if (!list_empty(&tbl->matcher_list))
last_matcher = list_last_entry(&tbl->matcher_list,
......@@ -78,7 +78,7 @@ int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
refcount_inc(&action->refcount);
out:
mutex_unlock(&tbl->dmn->mutex);
mlx5dr_domain_unlock(tbl->dmn);
return ret;
}
......@@ -95,7 +95,7 @@ static void dr_table_uninit_fdb(struct mlx5dr_table *tbl)
static void dr_table_uninit(struct mlx5dr_table *tbl)
{
mutex_lock(&tbl->dmn->mutex);
mlx5dr_domain_lock(tbl->dmn);
switch (tbl->dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
......@@ -112,7 +112,7 @@ static void dr_table_uninit(struct mlx5dr_table *tbl)
break;
}
mutex_unlock(&tbl->dmn->mutex);
mlx5dr_domain_unlock(tbl->dmn);
}
static int dr_table_init_nic(struct mlx5dr_domain *dmn,
......@@ -177,7 +177,7 @@ static int dr_table_init(struct mlx5dr_table *tbl)
INIT_LIST_HEAD(&tbl->matcher_list);
mutex_lock(&tbl->dmn->mutex);
mlx5dr_domain_lock(tbl->dmn);
switch (tbl->dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
......@@ -201,7 +201,7 @@ static int dr_table_init(struct mlx5dr_table *tbl)
break;
}
mutex_unlock(&tbl->dmn->mutex);
mlx5dr_domain_unlock(tbl->dmn);
return ret;
}
......
......@@ -636,6 +636,7 @@ struct mlx5dr_domain_rx_tx {
u64 drop_icm_addr;
u64 default_icm_addr;
enum mlx5dr_ste_entry_type ste_type;
struct mutex mutex; /* protect rx/tx domain */
};
struct mlx5dr_domain_info {
......@@ -660,7 +661,6 @@ struct mlx5dr_domain {
struct mlx5_uars_page *uar;
enum mlx5dr_domain_type type;
refcount_t refcount;
struct mutex mutex; /* protect domain */
struct mlx5dr_icm_pool *ste_icm_pool;
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
......@@ -814,6 +814,28 @@ struct mlx5dr_icm_chunk {
struct list_head *miss_list;
};
static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
{
mutex_lock(&nic_dmn->mutex);
}
static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
{
mutex_unlock(&nic_dmn->mutex);
}
static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
{
mlx5dr_domain_nic_lock(&dmn->info.rx);
mlx5dr_domain_nic_lock(&dmn->info.tx);
}
static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
{
mlx5dr_domain_nic_unlock(&dmn->info.tx);
mlx5dr_domain_nic_unlock(&dmn->info.rx);
}
static inline int
mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment