Commit 743edc85 authored by Ido Schimmel's avatar Ido Schimmel Committed by Jakub Kicinski

mlxsw: spectrum_acl_tcam: Fix warning during rehash

As previously explained, the rehash delayed work migrates filters from
one region to another. This is done by iterating over all chunks (all
the filters with the same priority) in the region and in each chunk
iterating over all the filters.

When the work runs out of credits it stores the current chunk and entry
as markers in the per-work context so that it would know where to resume
the migration from the next time the work is scheduled.

Upon error, the chunk marker is reset to NULL, but without resetting the
entry markers despite being relative to it. This can result in migration
being resumed from an entry that does not belong to the chunk being
migrated. In turn, this will eventually lead to a chunk being iterated
over as if it is an entry. Because of how the two structures happen to
be defined, this does not lead to KASAN splats, but to warnings such as
[1].

Fix by creating a helper that resets all the markers and call it from
all the places the currently only reset the chunk marker. For good
measures also call it when starting a completely new rehash. Add a
warning to avoid future cases.

[1]
WARNING: CPU: 7 PID: 1076 at drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c:407 mlxsw_afk_encode+0x242/0x2f0
Modules linked in:
CPU: 7 PID: 1076 Comm: kworker/7:24 Tainted: G        W          6.9.0-rc3-custom-00880-g29e61d91b77b #29
Hardware name: Mellanox Technologies Ltd. MSN3700/VMOD0005, BIOS 5.11 01/06/2019
Workqueue: mlxsw_core mlxsw_sp_acl_tcam_vregion_rehash_work
RIP: 0010:mlxsw_afk_encode+0x242/0x2f0
[...]
Call Trace:
 <TASK>
 mlxsw_sp_acl_atcam_entry_add+0xd9/0x3c0
 mlxsw_sp_acl_tcam_entry_create+0x5e/0xa0
 mlxsw_sp_acl_tcam_vchunk_migrate_all+0x109/0x290
 mlxsw_sp_acl_tcam_vregion_rehash_work+0x6c/0x470
 process_one_work+0x151/0x370
 worker_thread+0x2cb/0x3e0
 kthread+0xd0/0x100
 ret_from_fork+0x34/0x50
 </TASK>

Fixes: 6f9579d4 ("mlxsw: spectrum_acl: Remember where to continue rehash migration")
Signed-off-by: default avatarIdo Schimmel <idosch@nvidia.com>
Tested-by: default avatarAlexander Zubkov <green@qrator.net>
Reviewed-by: default avatarPetr Machata <petrm@nvidia.com>
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Link: https://lore.kernel.org/r/cc17eed86b41dd829d39b07906fec074a9ce580e.1713797103.git.petrm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8ca3f7a7
...@@ -730,6 +730,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work) ...@@ -730,6 +730,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
} }
static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
{
/* The entry markers are relative to the current chunk and therefore
* needs to be reset together with the chunk marker.
*/
ctx->current_vchunk = NULL;
ctx->start_ventry = NULL;
ctx->stop_ventry = NULL;
}
static void static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk) mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{ {
...@@ -752,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v ...@@ -752,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks * the current chunk pointer to make sure all chunks
* are properly migrated. * are properly migrated.
*/ */
vregion->rehash.ctx.current_vchunk = NULL; mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
} }
static struct mlxsw_sp_acl_tcam_vregion * static struct mlxsw_sp_acl_tcam_vregion *
...@@ -1220,7 +1231,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp, ...@@ -1220,7 +1231,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{ {
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2); mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL; vchunk->chunk2 = NULL;
ctx->current_vchunk = NULL; mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
} }
static int static int
...@@ -1252,6 +1263,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, ...@@ -1252,6 +1263,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list, ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list); typeof(*ventry), list);
WARN_ON(ventry->vchunk != vchunk);
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) { list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed /* During rollback, once we reach the ventry that failed
* to migrate, we are done. * to migrate, we are done.
...@@ -1343,7 +1356,7 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, ...@@ -1343,7 +1356,7 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
* to vregion->region. * to vregion->region.
*/ */
swap(vregion->region, vregion->region2); swap(vregion->region, vregion->region2);
ctx->current_vchunk = NULL; mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true; ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits); ctx, credits);
...@@ -1402,6 +1415,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp, ...@@ -1402,6 +1415,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv; ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false; ctx->this_is_rollback = false;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment