Commit 57856dde authored by Geetha sowjanya's avatar Geetha sowjanya Committed by David S. Miller

octeontx2-af: Support for disabling NPA Aura/Pool contexts

This patch adds support for a RVU PF/VF to disable all Aura/Pool
contexts of a NPA LF via mbox. This will be used by PF/VF drivers
upon teardown or while freeing up HW resources.

A HW context which is not INIT'ed cannot be modified and a
RVU PF/VF driver may or may not INIT all the Aura/Pool contexts.
So a bitmap is introduced to keep track of enabled NPA Aura/Pool
contexts, so that only enabled hw contexts are disabled upon LF
teardown.
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Signed-off-by: default avatarStanislaw Kardach <skardach@marvell.com>
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4a3581cd
...@@ -142,6 +142,7 @@ M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \ ...@@ -142,6 +142,7 @@ M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \ M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \ M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
...@@ -325,4 +326,10 @@ struct npa_aq_enq_rsp { ...@@ -325,4 +326,10 @@ struct npa_aq_enq_rsp {
}; };
}; };
/* Disable all contexts of type 'ctype' */
struct hwctx_disable_req {
struct mbox_msghdr hdr;
u8 ctype;
};
#endif /* MBOX_H */ #endif /* MBOX_H */
...@@ -77,6 +77,8 @@ struct rvu_pfvf { ...@@ -77,6 +77,8 @@ struct rvu_pfvf {
struct qmem *aura_ctx; struct qmem *aura_ctx;
struct qmem *pool_ctx; struct qmem *pool_ctx;
struct qmem *npa_qints_ctx; struct qmem *npa_qints_ctx;
unsigned long *aura_bmap;
unsigned long *pool_bmap;
}; };
struct rvu_hwinfo { struct rvu_hwinfo {
...@@ -216,6 +218,9 @@ void rvu_npa_freemem(struct rvu *rvu); ...@@ -216,6 +218,9 @@ void rvu_npa_freemem(struct rvu *rvu);
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
struct npa_aq_enq_req *req, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp); struct npa_aq_enq_rsp *rsp);
int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
struct npa_lf_alloc_req *req, struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp); struct npa_lf_alloc_rsp *rsp);
......
...@@ -63,6 +63,7 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, ...@@ -63,6 +63,7 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
struct admin_queue *aq; struct admin_queue *aq;
struct rvu_pfvf *pfvf; struct rvu_pfvf *pfvf;
void *ctx, *mask; void *ctx, *mask;
bool ena;
pfvf = rvu_get_pfvf(rvu, pcifunc); pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize) if (!pfvf->aura_ctx || req->aura_id >= pfvf->aura_ctx->qsize)
...@@ -149,6 +150,35 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, ...@@ -149,6 +150,35 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
return rc; return rc;
} }
/* Set aura bitmap if aura hw context is enabled */
if (req->ctype == NPA_AQ_CTYPE_AURA) {
if (req->op == NPA_AQ_INSTOP_INIT && req->aura.ena)
__set_bit(req->aura_id, pfvf->aura_bmap);
if (req->op == NPA_AQ_INSTOP_WRITE) {
ena = (req->aura.ena & req->aura_mask.ena) |
(test_bit(req->aura_id, pfvf->aura_bmap) &
~req->aura_mask.ena);
if (ena)
__set_bit(req->aura_id, pfvf->aura_bmap);
else
__clear_bit(req->aura_id, pfvf->aura_bmap);
}
}
/* Set pool bitmap if pool hw context is enabled */
if (req->ctype == NPA_AQ_CTYPE_POOL) {
if (req->op == NPA_AQ_INSTOP_INIT && req->pool.ena)
__set_bit(req->aura_id, pfvf->pool_bmap);
if (req->op == NPA_AQ_INSTOP_WRITE) {
ena = (req->pool.ena & req->pool_mask.ena) |
(test_bit(req->aura_id, pfvf->pool_bmap) &
~req->pool_mask.ena);
if (ena)
__set_bit(req->aura_id, pfvf->pool_bmap);
else
__clear_bit(req->aura_id, pfvf->pool_bmap);
}
}
spin_unlock(&aq->lock); spin_unlock(&aq->lock);
if (rsp) { if (rsp) {
...@@ -166,6 +196,51 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req, ...@@ -166,6 +196,51 @@ static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
return 0; return 0;
} }
static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npa_aq_enq_req aq_req;
unsigned long *bmap;
int id, cnt = 0;
int err = 0, rc;
if (!pfvf->pool_ctx || !pfvf->aura_ctx)
return NPA_AF_ERR_AQ_ENQUEUE;
memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
aq_req.hdr.pcifunc = req->hdr.pcifunc;
if (req->ctype == NPA_AQ_CTYPE_POOL) {
aq_req.pool.ena = 0;
aq_req.pool_mask.ena = 1;
cnt = pfvf->pool_ctx->qsize;
bmap = pfvf->pool_bmap;
} else if (req->ctype == NPA_AQ_CTYPE_AURA) {
aq_req.aura.ena = 0;
aq_req.aura_mask.ena = 1;
cnt = pfvf->aura_ctx->qsize;
bmap = pfvf->aura_bmap;
}
aq_req.ctype = req->ctype;
aq_req.op = NPA_AQ_INSTOP_WRITE;
for (id = 0; id < cnt; id++) {
if (!test_bit(id, bmap))
continue;
aq_req.aura_id = id;
rc = rvu_npa_aq_enq_inst(rvu, &aq_req, NULL);
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
(req->ctype == NPA_AQ_CTYPE_AURA) ?
"Aura" : "Pool", id);
}
}
return err;
}
int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
struct npa_aq_enq_req *req, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp) struct npa_aq_enq_rsp *rsp)
...@@ -173,11 +248,24 @@ int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, ...@@ -173,11 +248,24 @@ int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
return rvu_npa_aq_enq_inst(rvu, req, rsp); return rvu_npa_aq_enq_inst(rvu, req, rsp);
} }
int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
return npa_lf_hwctx_disable(rvu, req);
}
static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
{ {
kfree(pfvf->aura_bmap);
pfvf->aura_bmap = NULL;
qmem_free(rvu->dev, pfvf->aura_ctx); qmem_free(rvu->dev, pfvf->aura_ctx);
pfvf->aura_ctx = NULL; pfvf->aura_ctx = NULL;
kfree(pfvf->pool_bmap);
pfvf->pool_bmap = NULL;
qmem_free(rvu->dev, pfvf->pool_ctx); qmem_free(rvu->dev, pfvf->pool_ctx);
pfvf->pool_ctx = NULL; pfvf->pool_ctx = NULL;
...@@ -227,12 +315,22 @@ int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, ...@@ -227,12 +315,22 @@ int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
if (err) if (err)
goto free_mem; goto free_mem;
pfvf->aura_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
GFP_KERNEL);
if (!pfvf->aura_bmap)
goto free_mem;
/* Alloc memory for pool HW contexts */ /* Alloc memory for pool HW contexts */
hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF); hwctx_size = 1UL << ((ctx_cfg >> 4) & 0xF);
err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size); err = qmem_alloc(rvu->dev, &pfvf->pool_ctx, req->nr_pools, hwctx_size);
if (err) if (err)
goto free_mem; goto free_mem;
pfvf->pool_bmap = kcalloc(NPA_AURA_COUNT(req->aura_sz), sizeof(long),
GFP_KERNEL);
if (!pfvf->pool_bmap)
goto free_mem;
/* Get no of queue interrupts supported */ /* Get no of queue interrupts supported */
cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST); cfg = rvu_read64(rvu, blkaddr, NPA_AF_CONST);
qints = (cfg >> 28) & 0xFFF; qints = (cfg >> 28) & 0xFFF;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment