Commit 6b861682 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

mlxsw: spectrum_acl: Enable vregion rehash per-profile

For MR ACL profile is does not make sense to do periodical rehashes, as
there is only one mask in use during the whole vregion lifetime.
Therefore periodical work is scheduled but the rehash never happens.
So allow to enable/disable rehash for the whole group, which is added
per-profile. Disable rehashing for MR profile.

Addition to the vregion list is done only in case the rehash is enable
on the particular vregion. Also, the addition is moved after delayed
work init to avoid schedule of uninitialized work
from vregion_rehash_intrvl_set(). Symmetrically, deletion from
the list is done before canceling the delayed work so it is
not scheduled by vregion_rehash_intrvl_set() again.
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 65e19035
...@@ -175,6 +175,7 @@ struct mlxsw_sp_acl_tcam_vgroup { ...@@ -175,6 +175,7 @@ struct mlxsw_sp_acl_tcam_vgroup {
unsigned int patterns_count; unsigned int patterns_count;
bool tmplt_elusage_set; bool tmplt_elusage_set;
struct mlxsw_afk_element_usage tmplt_elusage; struct mlxsw_afk_element_usage tmplt_elusage;
bool vregion_rehash_enabled;
}; };
struct mlxsw_sp_acl_tcam_vregion { struct mlxsw_sp_acl_tcam_vregion {
...@@ -188,6 +189,7 @@ struct mlxsw_sp_acl_tcam_vregion { ...@@ -188,6 +189,7 @@ struct mlxsw_sp_acl_tcam_vregion {
struct list_head vchunk_list; /* List of vchunks under this vregion */ struct list_head vchunk_list; /* List of vchunks under this vregion */
struct mlxsw_afk_key_info *key_info; struct mlxsw_afk_key_info *key_info;
struct mlxsw_sp_acl_tcam *tcam; struct mlxsw_sp_acl_tcam *tcam;
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
struct delayed_work rehash_dw; struct delayed_work rehash_dw;
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
bool failed_rollback; /* Indicates failed rollback during migration */ bool failed_rollback; /* Indicates failed rollback during migration */
...@@ -290,12 +292,15 @@ mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp, ...@@ -290,12 +292,15 @@ mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vgroup *vgroup, struct mlxsw_sp_acl_tcam_vgroup *vgroup,
const struct mlxsw_sp_acl_tcam_pattern *patterns, const struct mlxsw_sp_acl_tcam_pattern *patterns,
unsigned int patterns_count, unsigned int patterns_count,
struct mlxsw_afk_element_usage *tmplt_elusage) struct mlxsw_afk_element_usage *tmplt_elusage,
bool vregion_rehash_enabled)
{ {
int err; int err;
vgroup->patterns = patterns; vgroup->patterns = patterns;
vgroup->patterns_count = patterns_count; vgroup->patterns_count = patterns_count;
vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
if (tmplt_elusage) { if (tmplt_elusage) {
vgroup->tmplt_elusage_set = true; vgroup->tmplt_elusage_set = true;
memcpy(&vgroup->tmplt_elusage, tmplt_elusage, memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
...@@ -753,6 +758,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp, ...@@ -753,6 +758,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
mutex_init(&vregion->lock); mutex_init(&vregion->lock);
vregion->tcam = tcam; vregion->tcam = tcam;
vregion->mlxsw_sp = mlxsw_sp; vregion->mlxsw_sp = mlxsw_sp;
vregion->vgroup = vgroup;
vregion->ref_count = 1; vregion->ref_count = 1;
vregion->key_info = mlxsw_afk_key_info_get(afk, elusage); vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
...@@ -773,13 +779,12 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp, ...@@ -773,13 +779,12 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_vgroup_vregion_attach; goto err_vgroup_vregion_attach;
list_add_tail(&vregion->tlist, &tcam->vregion_list); if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
if (ops->region_rehash_hints_get) {
/* Create the delayed work for vregion periodic rehash */ /* Create the delayed work for vregion periodic rehash */
INIT_DELAYED_WORK(&vregion->rehash_dw, INIT_DELAYED_WORK(&vregion->rehash_dw,
mlxsw_sp_acl_tcam_vregion_rehash_work); mlxsw_sp_acl_tcam_vregion_rehash_work);
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
list_add_tail(&vregion->tlist, &tcam->vregion_list);
} }
return vregion; return vregion;
...@@ -798,10 +803,12 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp, ...@@ -798,10 +803,12 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion) struct mlxsw_sp_acl_tcam_vregion *vregion)
{ {
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
if (ops->region_rehash_hints_get) if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
list_del(&vregion->tlist);
cancel_delayed_work_sync(&vregion->rehash_dw); cancel_delayed_work_sync(&vregion->rehash_dw);
list_del(&vregion->tlist); }
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion); mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2) if (vregion->region2)
mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2); mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
...@@ -1410,7 +1417,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, ...@@ -1410,7 +1417,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup, return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns, mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
tmplt_elusage); tmplt_elusage, true);
} }
static void static void
...@@ -1527,7 +1534,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp, ...@@ -1527,7 +1534,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup, err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
mlxsw_sp_acl_tcam_patterns, mlxsw_sp_acl_tcam_patterns,
MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
tmplt_elusage); tmplt_elusage, false);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment