Commit 76087d99 authored by Michael Chan's avatar Michael Chan Committed by Jakub Kicinski

bnxt_en: Restructure context memory data structures

The current code uses a flat bnxt_ctx_mem_info structure to store 8
types of context memory for the NIC.  All the context memory types
are very similar and have similar parameters.  They can all share a
common structure to improve the organization.  Also, new firmware
interface will provide a new API to retrieve each type of context
memory by calling the API repeatedly.

This patch reorganizes the bnxt_ctx_mem_info structure to fit better
with the new firmware interface.  It will also work with the legacy
firmware interface.  The flat fields in bnxt_ctx_mem_info are replaced
by the bnxt_ctx_mem_type array.  The bnxt_mem_init array info will no
longer be needed.
Reviewed-by: default avatarSomnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: default avatarPavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20231120234405.194542-4-michael.chan@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent e50dc4c2
...@@ -3121,20 +3121,20 @@ static void bnxt_free_skbs(struct bnxt *bp) ...@@ -3121,20 +3121,20 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp); bnxt_free_rx_skbs(bp);
} }
static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len) static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
{ {
u8 init_val = mem_init->init_val; u8 init_val = ctxm->init_value;
u16 offset = mem_init->offset; u16 offset = ctxm->init_offset;
u8 *p2 = p; u8 *p2 = p;
int i; int i;
if (!init_val) if (!init_val)
return; return;
if (offset == BNXT_MEM_INVALID_OFFSET) { if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
memset(p, init_val, len); memset(p, init_val, len);
return; return;
} }
for (i = 0; i < len; i += mem_init->size) for (i = 0; i < len; i += ctxm->entry_size)
*(p2 + i + offset) = init_val; *(p2 + i + offset) = init_val;
} }
...@@ -3201,8 +3201,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) ...@@ -3201,8 +3201,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i]) if (!rmem->pg_arr[i])
return -ENOMEM; return -ENOMEM;
if (rmem->mem_init) if (rmem->ctx_mem)
bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i], bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i],
rmem->page_size); rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) { if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 && if (i == rmem->nr_pages - 2 &&
...@@ -7175,37 +7175,16 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) ...@@ -7175,37 +7175,16 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
return rc; return rc;
} }
static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx, static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
struct hwrm_func_backing_store_qcaps_output *resp) u8 init_val, u8 init_offset,
bool init_mask_set)
{ {
struct bnxt_mem_init *mem_init; ctxm->init_value = init_val;
u16 init_mask; ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
u8 init_val; if (init_mask_set)
u8 *offset; ctxm->init_offset = init_offset * 4;
int i; else
ctxm->init_value = 0;
init_val = resp->ctx_kind_initializer;
init_mask = le16_to_cpu(resp->ctx_init_mask);
offset = &resp->qp_init_offset;
mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
mem_init->init_val = init_val;
mem_init->offset = BNXT_MEM_INVALID_OFFSET;
if (!init_mask)
continue;
if (i == BNXT_CTX_MEM_INIT_STAT)
offset = &resp->stat_init_offset;
if (init_mask & (1 << i))
mem_init->offset = *offset * 4;
else
mem_init->init_val = 0;
}
ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
} }
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
...@@ -7225,8 +7204,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -7225,8 +7204,11 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
rc = hwrm_req_send_silent(bp, req); rc = hwrm_req_send_silent(bp, req);
if (!rc) { if (!rc) {
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx; struct bnxt_ctx_mem_info *ctx;
u8 init_val, init_idx = 0;
int i, tqm_rings; int i, tqm_rings;
u16 init_mask;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) { if (!ctx) {
...@@ -7235,39 +7217,69 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -7235,39 +7217,69 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
} }
bp->ctx = ctx; bp->ctx = ctx;
ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries); init_val = resp->ctx_kind_initializer;
ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); init_mask = le16_to_cpu(resp->ctx_init_mask);
ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size); ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries); ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size); ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries); bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size); (init_mask & (1 << init_idx++)) != 0);
ctx->vnic_max_vnic_entries =
le16_to_cpu(resp->vnic_max_vnic_entries); ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
ctx->vnic_max_ring_table_entries = ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
(init_mask & (1 << init_idx++)) != 0);
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
ctxm->vnic_entries = le16_to_cpu(resp->vnic_max_vnic_entries);
ctxm->max_entries = ctxm->vnic_entries +
le16_to_cpu(resp->vnic_max_ring_table_entries); le16_to_cpu(resp->vnic_max_ring_table_entries);
ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size); ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries); bnxt_init_ctx_initializer(ctxm, init_val,
ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size); resp->vnic_init_offset,
ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size); (init_mask & (1 << init_idx++)) != 0);
ctx->tqm_min_entries_per_ring =
le32_to_cpu(resp->tqm_min_entries_per_ring); ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
ctx->tqm_max_entries_per_ring = ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
le32_to_cpu(resp->tqm_max_entries_per_ring); ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
ctx->tqm_entries_multiple = resp->tqm_entries_multiple; bnxt_init_ctx_initializer(ctxm, init_val,
if (!ctx->tqm_entries_multiple) resp->stat_init_offset,
ctx->tqm_entries_multiple = 1; (init_mask & (1 << init_idx++)) != 0);
ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size); ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
ctx->mrav_num_entries_units = ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
ctxm->entry_multiple = resp->tqm_entries_multiple;
if (!ctxm->entry_multiple)
ctxm->entry_multiple = 1;
memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
ctxm->mrav_num_entries_units =
le16_to_cpu(resp->mrav_num_entries_units); le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size); bnxt_init_ctx_initializer(ctxm, init_val,
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries); resp->mrav_init_offset,
(init_mask & (1 << init_idx++)) != 0);
bnxt_init_ctx_initializer(ctx, resp); ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count) if (!ctx->tqm_fp_rings_count)
...@@ -7275,6 +7287,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) ...@@ -7275,6 +7287,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS; tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL); ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) { if (!ctx_pg) {
...@@ -7321,6 +7336,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -7321,6 +7336,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input *req; struct hwrm_func_backing_store_cfg_input *req;
struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_type *ctxm;
void **__req = (void **)&req; void **__req = (void **)&req;
u32 req_len = sizeof(*req); u32 req_len = sizeof(*req);
__le32 *num_entries; __le32 *num_entries;
...@@ -7343,70 +7359,86 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -7343,70 +7359,86 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
req->enables = cpu_to_le32(enables); req->enables = cpu_to_le32(enables);
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
ctx_pg = &ctx->qp_mem; ctx_pg = &ctx->qp_mem;
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
req->qp_num_entries = cpu_to_le32(ctx_pg->entries); req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size); req->qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->qpc_pg_size_qpc_lvl, &req->qpc_pg_size_qpc_lvl,
&req->qpc_page_dir); &req->qpc_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
ctx_pg = &ctx->srq_mem; ctx_pg = &ctx->srq_mem;
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
req->srq_num_entries = cpu_to_le32(ctx_pg->entries); req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size); req->srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->srq_pg_size_srq_lvl, &req->srq_pg_size_srq_lvl,
&req->srq_page_dir); &req->srq_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
ctx_pg = &ctx->cq_mem; ctx_pg = &ctx->cq_mem;
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
req->cq_num_entries = cpu_to_le32(ctx_pg->entries); req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size); req->cq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->cq_pg_size_cq_lvl, &req->cq_pg_size_cq_lvl,
&req->cq_page_dir); &req->cq_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
ctx_pg = &ctx->vnic_mem; ctx_pg = &ctx->vnic_mem;
req->vnic_num_vnic_entries = ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
cpu_to_le16(ctx->vnic_max_vnic_entries); req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req->vnic_num_ring_table_entries = req->vnic_num_ring_table_entries =
cpu_to_le16(ctx->vnic_max_ring_table_entries); cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); req->vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->vnic_pg_size_vnic_lvl, &req->vnic_pg_size_vnic_lvl,
&req->vnic_page_dir); &req->vnic_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
ctx_pg = &ctx->stat_mem; ctx_pg = &ctx->stat_mem;
req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries); ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size); req->stat_num_entries = cpu_to_le32(ctxm->max_entries);
req->stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->stat_pg_size_stat_lvl, &req->stat_pg_size_stat_lvl,
&req->stat_page_dir); &req->stat_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
u32 units;
ctx_pg = &ctx->mrav_mem; ctx_pg = &ctx->mrav_mem;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
if (ctx->mrav_num_entries_units) units = ctxm->mrav_num_entries_units;
flags |= if (units) {
FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; u32 num_mr, num_ah = ctxm->mrav_av_entries;
req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); u32 entries;
num_mr = ctx_pg->entries - num_ah;
entries = ((num_mr / units) << 16) | (num_ah / units);
req->mrav_num_entries = cpu_to_le32(entries);
flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
}
req->mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->mrav_pg_size_mrav_lvl, &req->mrav_pg_size_mrav_lvl,
&req->mrav_page_dir); &req->mrav_page_dir);
} }
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
ctx_pg = &ctx->tim_mem; ctx_pg = &ctx->tim_mem;
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
req->tim_num_entries = cpu_to_le32(ctx_pg->entries); req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size); req->tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req->tim_pg_size_tim_lvl, &req->tim_pg_size_tim_lvl,
&req->tim_page_dir); &req->tim_page_dir);
} }
ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req->tqm_sp_num_entries, for (i = 0, num_entries = &req->tqm_sp_num_entries,
pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req->tqm_sp_page_dir, pg_dir = &req->tqm_sp_page_dir,
...@@ -7416,7 +7448,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) ...@@ -7416,7 +7448,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (!(enables & ena)) if (!(enables & ena))
continue; continue;
req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); req->tqm_entry_size = cpu_to_le16(ctxm->entry_size);
ctx_pg = ctx->tqm_mem[i]; ctx_pg = ctx->tqm_mem[i];
*num_entries = cpu_to_le32(ctx_pg->entries); *num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
...@@ -7441,7 +7473,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, ...@@ -7441,7 +7473,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
u8 depth, struct bnxt_mem_init *mem_init) u8 depth, struct bnxt_ctx_mem_type *ctxm)
{ {
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc; int rc;
...@@ -7479,7 +7511,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, ...@@ -7479,7 +7511,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1; rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES; rmem->nr_pages = MAX_CTX_PAGES;
rmem->mem_init = mem_init; rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) { if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
...@@ -7494,7 +7526,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, ...@@ -7494,7 +7526,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth) if (rmem->nr_pages > 1 || depth)
rmem->depth = 1; rmem->depth = 1;
rmem->mem_init = mem_init; rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
} }
return rc; return rc;
...@@ -7559,10 +7591,12 @@ void bnxt_free_ctx_mem(struct bnxt *bp) ...@@ -7559,10 +7591,12 @@ void bnxt_free_ctx_mem(struct bnxt *bp)
static int bnxt_alloc_ctx_mem(struct bnxt *bp) static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{ {
struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx; struct bnxt_ctx_mem_info *ctx;
struct bnxt_mem_init *init; u32 l2_qps, qp1_qps, max_qps;
u32 mem_size, ena, entries; u32 mem_size, ena, entries;
u32 entries_sp, min; u32 entries_sp, min;
u32 srqs, max_srqs;
u32 num_mr, num_ah; u32 num_mr, num_ah;
u32 extra_srqs = 0; u32 extra_srqs = 0;
u32 extra_qps = 0; u32 extra_qps = 0;
...@@ -7579,60 +7613,65 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -7579,60 +7613,65 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0; return 0;
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
l2_qps = ctxm->qp_l2_entries;
qp1_qps = ctxm->qp_qp1_entries;
max_qps = ctxm->max_entries;
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
srqs = ctxm->srq_l2_entries;
max_srqs = ctxm->max_entries;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2; pg_lvl = 2;
extra_qps = 65536; extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
extra_srqs = 8192; extra_srqs = min_t(u32, 8192, max_srqs - srqs);
} }
ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
ctx_pg = &ctx->qp_mem; ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + ctx_pg->entries = l2_qps + qp1_qps + extra_qps;
extra_qps; if (ctxm->entry_size) {
if (ctx->qp_entry_size) { mem_size = ctxm->entry_size * ctx_pg->entries;
mem_size = ctx->qp_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, ctxm);
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc) if (rc)
return rc; return rc;
} }
ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
ctx_pg = &ctx->srq_mem; ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; ctx_pg->entries = srqs + extra_srqs;
if (ctx->srq_entry_size) { if (ctxm->entry_size) {
mem_size = ctx->srq_entry_size * ctx_pg->entries; mem_size = ctxm->entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ]; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, ctxm);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc) if (rc)
return rc; return rc;
} }
ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
ctx_pg = &ctx->cq_mem; ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; ctx_pg->entries = ctxm->cq_l2_entries + extra_qps * 2;
if (ctx->cq_entry_size) { if (ctxm->entry_size) {
mem_size = ctx->cq_entry_size * ctx_pg->entries; mem_size = ctxm->entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ]; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, ctxm);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
if (rc) if (rc)
return rc; return rc;
} }
ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
ctx_pg = &ctx->vnic_mem; ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries + ctx_pg->entries = ctxm->max_entries;
ctx->vnic_max_ring_table_entries; if (ctxm->entry_size) {
if (ctx->vnic_entry_size) { mem_size = ctxm->entry_size * ctx_pg->entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, ctxm);
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
if (rc) if (rc)
return rc; return rc;
} }
ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
ctx_pg = &ctx->stat_mem; ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries; ctx_pg->entries = ctxm->max_entries;
if (ctx->stat_entry_size) { if (ctxm->entry_size) {
mem_size = ctx->stat_entry_size * ctx_pg->entries; mem_size = ctxm->entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT]; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, ctxm);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
if (rc) if (rc)
return rc; return rc;
} }
...@@ -7641,30 +7680,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -7641,30 +7680,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
goto skip_rdma; goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
ctx_pg = &ctx->mrav_mem; ctx_pg = &ctx->mrav_mem;
/* 128K extra is needed to accommodate static AH context /* 128K extra is needed to accommodate static AH context
* allocation by f/w. * allocation by f/w.
*/ */
num_mr = 1024 * 256; num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
num_ah = 1024 * 128; num_ah = min_t(u32, num_mr, 1024 * 128);
ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
ctxm->mrav_av_entries = num_ah;
ctx_pg->entries = num_mr + num_ah; ctx_pg->entries = num_mr + num_ah;
if (ctx->mrav_entry_size) { if (ctxm->entry_size) {
mem_size = ctx->mrav_entry_size * ctx_pg->entries; mem_size = ctxm->entry_size * ctx_pg->entries;
init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV]; rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, ctxm);
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
if (rc) if (rc)
return rc; return rc;
} }
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
((num_mr / ctx->mrav_num_entries_units) << 16) |
(num_ah / ctx->mrav_num_entries_units);
ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
ctx_pg = &ctx->tim_mem; ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries; ctx_pg->entries = l2_qps + qp1_qps + extra_qps;
if (ctx->tim_entry_size) { if (ctxm->entry_size) {
mem_size = ctx->tim_entry_size * ctx_pg->entries; mem_size = ctxm->entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL); rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
if (rc) if (rc)
return rc; return rc;
...@@ -7672,18 +7712,19 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ...@@ -7672,18 +7712,19 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma: skip_rdma:
min = ctx->tqm_min_entries_per_ring; ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries + min = ctxm->min_entries;
2 * (extra_qps + ctx->qp_min_qp1_entries) + min; entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple); 2 * (extra_qps + qp1_qps) + min;
entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries); entries_sp = roundup(entries_sp, ctxm->entry_multiple);
entries = roundup(entries, ctx->tqm_entries_multiple); entries = l2_qps + 2 * (extra_qps + qp1_qps);
entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring); entries = roundup(entries, ctxm->entry_multiple);
entries = clamp_t(u32, entries, min, ctxm->max_entries);
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i]; ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = i ? entries : entries_sp; ctx_pg->entries = i ? entries : entries_sp;
if (ctx->tqm_entry_size) { if (ctxm->entry_size) {
mem_size = ctx->tqm_entry_size * ctx_pg->entries; mem_size = ctxm->entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
NULL); NULL);
if (rc) if (rc)
......
...@@ -762,13 +762,6 @@ struct bnxt_sw_rx_agg_bd { ...@@ -762,13 +762,6 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping; dma_addr_t mapping;
}; };
struct bnxt_mem_init {
u8 init_val;
u16 offset;
#define BNXT_MEM_INVALID_OFFSET 0xffff
u16 size;
};
struct bnxt_ring_mem_info { struct bnxt_ring_mem_info {
int nr_pages; int nr_pages;
int page_size; int page_size;
...@@ -778,7 +771,7 @@ struct bnxt_ring_mem_info { ...@@ -778,7 +771,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4 #define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth; u16 depth;
struct bnxt_mem_init *mem_init; struct bnxt_ctx_mem_type *ctx_mem;
void **pg_arr; void **pg_arr;
dma_addr_t *dma_arr; dma_addr_t *dma_arr;
...@@ -1551,35 +1544,70 @@ do { \ ...@@ -1551,35 +1544,70 @@ do { \
attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
} while (0) } while (0)
struct bnxt_ctx_mem_type {
u16 type;
u16 entry_size;
u32 flags;
u32 instance_bmap;
u8 init_value;
u8 entry_multiple;
u16 init_offset;
#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
u32 max_entries;
u32 min_entries;
u8 last:1;
u8 split_entry_cnt;
#define BNXT_MAX_SPLIT_ENTRY 4
union {
struct {
u32 qp_l2_entries;
u32 qp_qp1_entries;
u32 qp_fast_qpmd_entries;
};
u32 srq_l2_entries;
u32 cq_l2_entries;
u32 vnic_entries;
struct {
u32 mrav_av_entries;
u32 mrav_num_entries_units;
};
u32 split[BNXT_MAX_SPLIT_ENTRY];
};
};
#define BNXT_CTX_MRAV_AV_SPLIT_ENTRY 0
#define BNXT_CTX_QP FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP
#define BNXT_CTX_SRQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ
#define BNXT_CTX_CQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ
#define BNXT_CTX_VNIC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC
#define BNXT_CTX_STAT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT
#define BNXT_CTX_STQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING
#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info { struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
u16 qp_max_l2_entries;
u16 qp_entry_size;
u16 srq_max_l2_entries;
u32 srq_max_entries;
u16 srq_entry_size;
u16 cq_max_l2_entries;
u32 cq_max_entries;
u16 cq_entry_size;
u16 vnic_max_vnic_entries;
u16 vnic_max_ring_table_entries;
u16 vnic_entry_size;
u32 stat_max_entries;
u16 stat_entry_size;
u16 tqm_entry_size;
u32 tqm_min_entries_per_ring;
u32 tqm_max_entries_per_ring;
u32 mrav_max_entries;
u16 mrav_entry_size;
u16 tim_entry_size;
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 tqm_fp_rings_count; u8 tqm_fp_rings_count;
u32 flags; u32 flags;
#define BNXT_CTX_FLAG_INITED 0x01 #define BNXT_CTX_FLAG_INITED 0x01
struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_MAX];
struct bnxt_ctx_pg_info qp_mem; struct bnxt_ctx_pg_info qp_mem;
struct bnxt_ctx_pg_info srq_mem; struct bnxt_ctx_pg_info srq_mem;
...@@ -1589,15 +1617,6 @@ struct bnxt_ctx_mem_info { ...@@ -1589,15 +1617,6 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info mrav_mem; struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem; struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS]; struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
#define BNXT_CTX_MEM_INIT_QP 0
#define BNXT_CTX_MEM_INIT_SRQ 1
#define BNXT_CTX_MEM_INIT_CQ 2
#define BNXT_CTX_MEM_INIT_VNIC 3
#define BNXT_CTX_MEM_INIT_STAT 4
#define BNXT_CTX_MEM_INIT_MRAV 5
#define BNXT_CTX_MEM_INIT_MAX 6
struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
}; };
enum bnxt_health_severity { enum bnxt_health_severity {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment