Commit 428168f9 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-trap-adjacency'

Ido Schimmel says:

====================
mlxsw: Alter trap adjacency entry allocation scheme

In commit 0c3cbbf9 ("mlxsw: Add specific trap for packets routed via
invalid nexthops"), mlxsw started allocating a new adjacency entry
during driver initialization, to trap packets routed via invalid
nexthops.

This behavior was later altered in commit 983db619 ("mlxsw:
spectrum_router: Allocate discard adjacency entry when needed") to only
allocate the entry upon the first route that requires it. The motivation
for the change is explained in the commit message.

The problem with the current behavior is that the entry shows up as a
"leak" in a new BPF resource monitoring tool [1]. This is caused by the
asymmetry of the allocation/free scheme. While the entry is allocated
upon the first route that requires it, it is only freed during
de-initialization of the driver.

Instead, this patchset tracks the number of active nexthop groups and
allocates the adjacency entry upon the creation of the first group. The
entry is freed when the number of active groups reaches zero.

Patch #1 adds the new entry.

Patch #2 converts mlxsw to start using the new entry and removes the old
one.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8bea96ef e3a3aae7
...@@ -4376,6 +4376,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, ...@@ -4376,6 +4376,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
} }
} }
static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
{
enum mlxsw_reg_ratr_trap_action trap_action;
char ratr_pl[MLXSW_REG_RATR_LEN];
int err;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
&mlxsw_sp->router->adj_trap_index);
if (err)
return err;
trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
MLXSW_REG_RATR_TYPE_ETHERNET,
mlxsw_sp->router->adj_trap_index,
mlxsw_sp->router->lb_rif_index);
mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
if (err)
goto err_ratr_write;
return 0;
err_ratr_write:
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
mlxsw_sp->router->adj_trap_index);
return err;
}
static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
mlxsw_sp->router->adj_trap_index);
}
static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
{
int err;
if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
return 0;
err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
if (err)
return err;
refcount_set(&mlxsw_sp->router->num_groups, 1);
return 0;
}
static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
{
if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
return;
mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
}
static void static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nexthop_group *nh_grp, const struct mlxsw_sp_nexthop_group *nh_grp,
...@@ -4790,6 +4850,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, ...@@ -4790,6 +4850,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_nexthop_obj_init; goto err_nexthop_obj_init;
} }
err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
if (err)
goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err) { if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device"); NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
...@@ -4808,6 +4871,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, ...@@ -4808,6 +4871,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0; return 0;
err_group_refresh: err_group_refresh:
mlxsw_sp_nexthop_group_dec(mlxsw_sp);
err_group_inc:
i = nhgi->count; i = nhgi->count;
err_nexthop_obj_init: err_nexthop_obj_init:
for (i--; i >= 0; i--) { for (i--; i >= 0; i--) {
...@@ -4832,6 +4897,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp, ...@@ -4832,6 +4897,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
cancel_delayed_work(&router->nh_grp_activity_dw); cancel_delayed_work(&router->nh_grp_activity_dw);
} }
mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) { for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
...@@ -5223,6 +5289,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, ...@@ -5223,6 +5289,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err) if (err)
goto err_nexthop4_init; goto err_nexthop4_init;
} }
err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
if (err)
goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err) if (err)
goto err_group_refresh; goto err_group_refresh;
...@@ -5230,6 +5299,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, ...@@ -5230,6 +5299,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0; return 0;
err_group_refresh: err_group_refresh:
mlxsw_sp_nexthop_group_dec(mlxsw_sp);
err_group_inc:
i = nhgi->count; i = nhgi->count;
err_nexthop4_init: err_nexthop4_init:
for (i--; i >= 0; i--) { for (i--; i >= 0; i--) {
...@@ -5247,6 +5318,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp, ...@@ -5247,6 +5318,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i; int i;
mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) { for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
...@@ -5725,41 +5797,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp, ...@@ -5725,41 +5797,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
return err; return err;
} }
static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
{
enum mlxsw_reg_ratr_trap_action trap_action;
char ratr_pl[MLXSW_REG_RATR_LEN];
int err;
if (mlxsw_sp->router->adj_discard_index_valid)
return 0;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
&mlxsw_sp->router->adj_discard_index);
if (err)
return err;
trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
MLXSW_REG_RATR_TYPE_ETHERNET,
mlxsw_sp->router->adj_discard_index,
mlxsw_sp->router->lb_rif_index);
mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
if (err)
goto err_ratr_write;
mlxsw_sp->router->adj_discard_index_valid = true;
return 0;
err_ratr_write:
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
mlxsw_sp->router->adj_discard_index);
return err;
}
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry, struct mlxsw_sp_fib_entry *fib_entry,
...@@ -5772,7 +5809,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, ...@@ -5772,7 +5809,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
u16 trap_id = 0; u16 trap_id = 0;
u32 adjacency_index = 0; u32 adjacency_index = 0;
u16 ecmp_size = 0; u16 ecmp_size = 0;
int err;
/* In case the nexthop group adjacency index is valid, use it /* In case the nexthop group adjacency index is valid, use it
* with provided ECMP size. Otherwise, setup trap and pass * with provided ECMP size. Otherwise, setup trap and pass
...@@ -5783,11 +5819,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, ...@@ -5783,11 +5819,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
adjacency_index = nhgi->adj_index; adjacency_index = nhgi->adj_index;
ecmp_size = nhgi->ecmp_size; ecmp_size = nhgi->ecmp_size;
} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) { } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
err = mlxsw_sp_adj_discard_write(mlxsw_sp);
if (err)
return err;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
adjacency_index = mlxsw_sp->router->adj_discard_index; adjacency_index = mlxsw_sp->router->adj_trap_index;
ecmp_size = 1; ecmp_size = 1;
} else { } else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
...@@ -6641,6 +6674,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, ...@@ -6641,6 +6674,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
} }
nh_grp->nhgi = nhgi; nh_grp->nhgi = nhgi;
err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
if (err)
goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err) if (err)
goto err_group_refresh; goto err_group_refresh;
...@@ -6648,6 +6684,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, ...@@ -6648,6 +6684,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0; return 0;
err_group_refresh: err_group_refresh:
mlxsw_sp_nexthop_group_dec(mlxsw_sp);
err_group_inc:
i = nhgi->count; i = nhgi->count;
err_nexthop6_init: err_nexthop6_init:
for (i--; i >= 0; i--) { for (i--; i >= 0; i--) {
...@@ -6665,6 +6703,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp, ...@@ -6665,6 +6703,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i; int i;
mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) { for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
...@@ -7340,16 +7379,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) ...@@ -7340,16 +7379,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
continue; continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
} }
/* After flushing all the routes, it is not possible anyone is still
* using the adjacency index that is discarding packets, so free it in
* case it was allocated.
*/
if (!mlxsw_sp->router->adj_discard_index_valid)
return;
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
mlxsw_sp->router->adj_discard_index);
mlxsw_sp->router->adj_discard_index_valid = false;
} }
struct mlxsw_sp_fib6_event { struct mlxsw_sp_fib6_event {
......
...@@ -65,8 +65,6 @@ struct mlxsw_sp_router { ...@@ -65,8 +65,6 @@ struct mlxsw_sp_router {
struct notifier_block inet6addr_nb; struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
u32 adj_discard_index;
bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config; struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */ struct mutex lock; /* Protects shared router resources */
struct work_struct fib_event_work; struct work_struct fib_event_work;
...@@ -82,6 +80,8 @@ struct mlxsw_sp_router { ...@@ -82,6 +80,8 @@ struct mlxsw_sp_router {
struct delayed_work nh_grp_activity_dw; struct delayed_work nh_grp_activity_dw;
struct list_head nh_res_grp_list; struct list_head nh_res_grp_list;
bool inc_parsing_depth; bool inc_parsing_depth;
refcount_t num_groups;
u32 adj_trap_index;
}; };
struct mlxsw_sp_fib_entry_priv { struct mlxsw_sp_fib_entry_priv {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment