Commit e5b2bc30 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Cache STE shadow memory

During rule insertion on each ICM memory chunk we also allocate shadow memory
used for management. This includes the hw_ste, dr_ste and miss list per entry.
Since the scale of these allocations is large we noticed a performance hiccup
that happens once malloc and free are stressed.
In extreme usecases when ~1M chunks are freed at once, it might take up to 40
seconds to complete this, up to the point the kernel sees this as self-detected
stall on CPU:

 rcu: INFO: rcu_sched self-detected stall on CPU

To resolve this we will increase the reuse of shadow memory.
Doing this we see that a time in the aforementioned usecase dropped from ~40
seconds to ~8-10 seconds.

Fixes: 29cf8feb ("net/mlx5: DR, ICM pool memory allocator")
Signed-off-by: default avatarAlex Vesker <valex@nvidia.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent f908a35b
...@@ -136,37 +136,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) ...@@ -136,37 +136,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
kvfree(icm_mr); kvfree(icm_mr);
} }
static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
{ {
chunk->ste_arr = kvzalloc(chunk->num_of_entries * /* We support only one type of STE size, both for ConnectX-5 and later
sizeof(chunk->ste_arr[0]), GFP_KERNEL); * devices. Once the support for match STE which has a larger tag is
if (!chunk->ste_arr) * added (32B instead of 16B), the STE size for devices later than
return -ENOMEM; * ConnectX-5 needs to account for that.
*/
chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * return DR_STE_SIZE_REDUCED;
DR_STE_SIZE_REDUCED, GFP_KERNEL); }
if (!chunk->hw_ste_arr)
goto out_free_ste_arr;
chunk->miss_list = kvmalloc(chunk->num_of_entries *
sizeof(chunk->miss_list[0]), GFP_KERNEL);
if (!chunk->miss_list)
goto out_free_hw_ste_arr;
return 0; static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
{
struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
int index = offset / DR_STE_SIZE;
out_free_hw_ste_arr: chunk->ste_arr = &buddy->ste_arr[index];
kvfree(chunk->hw_ste_arr); chunk->miss_list = &buddy->miss_list[index];
out_free_ste_arr: chunk->hw_ste_arr = buddy->hw_ste_arr +
kvfree(chunk->ste_arr); index * dr_icm_buddy_get_ste_size(buddy);
return -ENOMEM;
} }
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{ {
kvfree(chunk->miss_list); struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
kvfree(chunk->hw_ste_arr);
kvfree(chunk->ste_arr); memset(chunk->hw_ste_arr, 0,
chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
memset(chunk->ste_arr, 0,
chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
} }
static enum mlx5dr_icm_type static enum mlx5dr_icm_type
...@@ -189,6 +187,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk, ...@@ -189,6 +187,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
kvfree(chunk); kvfree(chunk);
} }
static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{
int num_of_entries =
mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
buddy->ste_arr = kvcalloc(num_of_entries,
sizeof(struct mlx5dr_ste), GFP_KERNEL);
if (!buddy->ste_arr)
return -ENOMEM;
/* Preallocate full STE size on non-ConnectX-5 devices since
* we need to support both full and reduced with the same cache.
*/
buddy->hw_ste_arr = kvcalloc(num_of_entries,
dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
if (!buddy->hw_ste_arr)
goto free_ste_arr;
buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
if (!buddy->miss_list)
goto free_hw_ste_arr;
return 0;
free_hw_ste_arr:
kvfree(buddy->hw_ste_arr);
free_ste_arr:
kvfree(buddy->ste_arr);
return -ENOMEM;
}
static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
{
kvfree(buddy->ste_arr);
kvfree(buddy->hw_ste_arr);
kvfree(buddy->miss_list);
}
static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{ {
struct mlx5dr_icm_buddy_mem *buddy; struct mlx5dr_icm_buddy_mem *buddy;
...@@ -208,11 +244,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) ...@@ -208,11 +244,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
buddy->icm_mr = icm_mr; buddy->icm_mr = icm_mr;
buddy->pool = pool; buddy->pool = pool;
if (pool->icm_type == DR_ICM_TYPE_STE) {
/* Reduce allocations by preallocating and reusing the STE structures */
if (dr_icm_buddy_init_ste_cache(buddy))
goto err_cleanup_buddy;
}
/* add it to the -start- of the list in order to search in it first */ /* add it to the -start- of the list in order to search in it first */
list_add(&buddy->list_node, &pool->buddy_mem_list); list_add(&buddy->list_node, &pool->buddy_mem_list);
return 0; return 0;
err_cleanup_buddy:
mlx5dr_buddy_cleanup(buddy);
err_free_buddy: err_free_buddy:
kvfree(buddy); kvfree(buddy);
free_mr: free_mr:
...@@ -234,6 +278,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) ...@@ -234,6 +278,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
mlx5dr_buddy_cleanup(buddy); mlx5dr_buddy_cleanup(buddy);
if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
dr_icm_buddy_cleanup_ste_cache(buddy);
kvfree(buddy); kvfree(buddy);
} }
...@@ -261,26 +308,18 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, ...@@ -261,26 +308,18 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
chunk->byte_size = chunk->byte_size =
mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg; chunk->seg = seg;
chunk->buddy_mem = buddy_mem_pool;
if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { if (pool->icm_type == DR_ICM_TYPE_STE)
mlx5dr_err(pool->dmn, dr_icm_chunk_ste_init(chunk, offset);
"Failed to init ste arrays (order: %d)\n",
chunk_size);
goto out_free_chunk;
}
buddy_mem_pool->used_memory += chunk->byte_size; buddy_mem_pool->used_memory += chunk->byte_size;
chunk->buddy_mem = buddy_mem_pool;
INIT_LIST_HEAD(&chunk->chunk_list); INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */ /* chunk now is part of the used_list */
list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
return chunk; return chunk;
out_free_chunk:
kvfree(chunk);
return NULL;
} }
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
......
...@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem { ...@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
* sync_ste command sets them free. * sync_ste command sets them free.
*/ */
struct list_head hot_list; struct list_head hot_list;
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
struct list_head *miss_list;
u8 *hw_ste_arr;
}; };
int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment