Commit 019aba04 authored by Naveen Mamindlapalli's avatar Naveen Mamindlapalli Committed by Paolo Abeni

octeontx2-af: Modify SMQ flush sequence to drop packets

The current implementation of SMQ flush sequence waits for the packets
in the TM pipeline to be transmitted out of the link. This sequence
doesn't succeed in HW when there is any issue with link such as lack of
link credits, link down or any other traffic that is fully occupying the
link bandwidth (QoS). This patch modifies the SMQ flush sequence to
drop the packets after TL1 level (SQM) instead of polling for the packets
to be sent out of RPM/CGX link.

Fixes: 5d9b976d ("octeontx2-af: Support fixed transmit scheduler topology")
Signed-off-by: default avatarNaveen Mamindlapalli <naveenm@marvell.com>
Reviewed-by: default avatarSunil Kovvuri Goutham <sgoutham@marvell.com>
Link: https://patch.msgid.link/20240906045838.1620308-1-naveenm@marvell.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 4c800227
...@@ -319,6 +319,7 @@ struct nix_mark_format { ...@@ -319,6 +319,7 @@ struct nix_mark_format {
/* smq(flush) to tl1 cir/pir info */ /* smq(flush) to tl1 cir/pir info */
struct nix_smq_tree_ctx { struct nix_smq_tree_ctx {
u16 schq;
u64 cir_off; u64 cir_off;
u64 cir_val; u64 cir_val;
u64 pir_off; u64 pir_off;
...@@ -328,8 +329,6 @@ struct nix_smq_tree_ctx { ...@@ -328,8 +329,6 @@ struct nix_smq_tree_ctx {
/* smq flush context */ /* smq flush context */
struct nix_smq_flush_ctx { struct nix_smq_flush_ctx {
int smq; int smq;
u16 tl1_schq;
u16 tl2_schq;
struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT]; struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
}; };
......
...@@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, ...@@ -2259,14 +2259,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
schq = smq; schq = smq;
for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
smq_tree_ctx->schq = schq;
if (lvl == NIX_TXSCH_LVL_TL1) { if (lvl == NIX_TXSCH_LVL_TL1) {
smq_flush_ctx->tl1_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
smq_tree_ctx->pir_off = 0; smq_tree_ctx->pir_off = 0;
smq_tree_ctx->pir_val = 0; smq_tree_ctx->pir_val = 0;
parent_off = 0; parent_off = 0;
} else if (lvl == NIX_TXSCH_LVL_TL2) { } else if (lvl == NIX_TXSCH_LVL_TL2) {
smq_flush_ctx->tl2_schq = schq;
smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
parent_off = NIX_AF_TL2X_PARENT(schq); parent_off = NIX_AF_TL2X_PARENT(schq);
...@@ -2301,8 +2300,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, ...@@ -2301,8 +2300,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
{ {
struct nix_txsch *txsch; struct nix_txsch *txsch;
struct nix_hw *nix_hw; struct nix_hw *nix_hw;
int tl2, tl2_schq;
u64 regoff; u64 regoff;
int tl2;
nix_hw = get_nix_hw(rvu->hw, blkaddr); nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw) if (!nix_hw)
...@@ -2310,16 +2309,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, ...@@ -2310,16 +2309,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
/* loop through all TL2s with matching PF_FUNC */ /* loop through all TL2s with matching PF_FUNC */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
/* skip the smq(flush) TL2 */ /* skip the smq(flush) TL2 */
if (tl2 == smq_flush_ctx->tl2_schq) if (tl2 == tl2_schq)
continue; continue;
/* skip unused TL2s */ /* skip unused TL2s */
if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
continue; continue;
/* skip if PF_FUNC doesn't match */ /* skip if PF_FUNC doesn't match */
if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
(TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
~RVU_PFVF_FUNC_MASK))) ~RVU_PFVF_FUNC_MASK)))
continue; continue;
/* enable/disable XOFF */ /* enable/disable XOFF */
...@@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, ...@@ -2361,10 +2361,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf) int smq, u16 pcifunc, int nixlf)
{ {
struct nix_smq_flush_ctx *smq_flush_ctx; struct nix_smq_flush_ctx *smq_flush_ctx;
int err, restore_tx_en = 0, i;
int pf = rvu_get_pf(pcifunc); int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0; u8 cgx_id = 0, lmac_id = 0;
int err, restore_tx_en = 0; u16 tl2_tl3_link_schq;
u64 cfg; u8 link, link_level;
u64 cfg, bmap = 0;
if (!is_rvu_otx2(rvu)) { if (!is_rvu_otx2(rvu)) {
/* Skip SMQ flush if pkt count is zero */ /* Skip SMQ flush if pkt count is zero */
...@@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, ...@@ -2388,16 +2390,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
/* Disable backpressure from physical link, /* Disable backpressure from physical link,
* otherwise SMQ flush may stall. * otherwise SMQ flush may stall.
*/ */
rvu_cgx_enadis_rx_bp(rvu, pf, false); rvu_cgx_enadis_rx_bp(rvu, pf, false);
link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
/* SMQ set enqueue xoff */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
cfg |= BIT_ULL(50);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
/* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
cfg = rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
if (!(cfg & BIT_ULL(12)))
continue;
bmap |= (1 << i);
cfg &= ~BIT_ULL(12);
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
}
/* Do SMQ flush and set enqueue xoff */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
/* Wait for flush to complete */ /* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr, err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
...@@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, ...@@ -2406,6 +2430,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
"NIXLF%d: SMQ%d flush failed, txlink might be busy\n", "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
nixlf, smq); nixlf, smq);
/* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
if (!(bmap & (1 << i)))
continue;
cfg = rvu_read64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
cfg |= BIT_ULL(12);
rvu_write64(rvu, blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
}
/* clear XOFF on TL2s */ /* clear XOFF on TL2s */
nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment