Commit d6212d2e authored by Geetha sowjanya's avatar Geetha sowjanya Committed by David S. Miller

octeontx2-af: Create BPIDs free pool

In current driver 64 BPIDs are reserved for LBK interfaces.
These bpids are 1-to-1 mapped to LBK interface channel numbers.
In some usecases one LBK interface required more than one
bpids and in some case they may not require at all.
These usescase can't be address with the current implementation
as it always reserves only one bpid per LBK channel.
This patch addresses this issue by creating free bpid pool from these
64 bpids instead of 1-to-1 mapping to the lbk channel.
Now based on usecase LBK interface can request a bpid using (bp_enable()).

This patch also reduces the number of bpids for cgx interfaces to 8
and adds proper error code
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 094bdd48
...@@ -837,6 +837,8 @@ enum nix_af_status { ...@@ -837,6 +837,8 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431, NIX_AF_ERR_LINK_CREDITS = -431,
NIX_AF_ERR_INVALID_BPID = -434,
NIX_AF_ERR_INVALID_BPID_REQ = -435,
NIX_AF_ERR_INVALID_MCAST_GRP = -436, NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437, NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438, NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
......
...@@ -2618,6 +2618,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) ...@@ -2618,6 +2618,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA) * 3. Cleanup pools (NPA)
*/ */
/* Free allocated BPIDs */
rvu_nix_flr_free_bpids(rvu, pcifunc);
/* Free multicast/mirror node associated with the 'pcifunc' */ /* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc); rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
......
...@@ -288,6 +288,16 @@ enum rvu_pfvf_flags { ...@@ -288,6 +288,16 @@ enum rvu_pfvf_flags {
#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC) #define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
struct nix_bp {
struct rsrc_bmap bpids; /* free bpids bitmap */
u16 cgx_bpid_cnt;
u16 sdp_bpid_cnt;
u16 free_pool_base;
u16 *fn_map; /* pcifunc mapping */
u8 *intf_map; /* interface type map */
u8 *ref_cnt;
};
struct nix_txsch { struct nix_txsch {
struct rsrc_bmap schq; struct rsrc_bmap schq;
u8 lvl; u8 lvl;
...@@ -363,6 +373,7 @@ struct nix_hw { ...@@ -363,6 +373,7 @@ struct nix_hw {
struct nix_lso lso; struct nix_lso lso;
struct nix_txvlan txvlan; struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer; struct nix_ipolicer *ipolicer;
struct nix_bp bp;
u64 *tx_credits; u64 *tx_credits;
u8 cc_mcs_cnt; u8 cc_mcs_cnt;
}; };
...@@ -873,6 +884,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, ...@@ -873,6 +884,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx); u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index); u32 mcast_grp_idx, u16 mcam_index);
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc);
/* NPC APIs */ /* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu); void rvu_npc_freemem(struct rvu *rvu);
......
...@@ -499,14 +499,84 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) ...@@ -499,14 +499,84 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
rvu_cgx_disable_dmac_entries(rvu, pcifunc); rvu_cgx_disable_dmac_entries(rvu, pcifunc);
} }
#define NIX_BPIDS_PER_LMAC 8
#define NIX_BPIDS_PER_CPT 1
static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr)
{
struct nix_bp *bp = &hw->bp;
int err, max_bpids;
u64 cfg;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg);
/* Reserve the BPIds for CGX and SDP */
bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC;
bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg);
bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt +
NIX_BPIDS_PER_CPT;
bp->bpids.max = max_bpids - bp->free_pool_base;
err = rvu_alloc_bitmap(&bp->bpids);
if (err)
return err;
bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max,
sizeof(u16), GFP_KERNEL);
if (!bp->fn_map)
return -ENOMEM;
bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max,
sizeof(u8), GFP_KERNEL);
if (!bp->intf_map)
return -ENOMEM;
bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max,
sizeof(u8), GFP_KERNEL);
if (!bp->ref_cnt)
return -ENOMEM;
return 0;
}
void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
{
int blkaddr, bpid, err;
struct nix_hw *nix_hw;
struct nix_bp *bp;
if (!is_afvf(pcifunc))
return;
err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
return;
bp = &nix_hw->bp;
mutex_lock(&rvu->rsrc_lock);
for (bpid = 0; bpid < bp->bpids.max; bpid++) {
if (bp->fn_map[bpid] == pcifunc) {
bp->ref_cnt[bpid]--;
if (bp->ref_cnt[bpid])
continue;
rvu_free_rsrc(&bp->bpids, bpid);
bp->fn_map[bpid] = 0;
}
}
mutex_unlock(&rvu->rsrc_lock);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
struct nix_bp_cfg_req *req, struct nix_bp_cfg_req *req,
struct msg_rsp *rsp) struct msg_rsp *rsp)
{ {
u16 pcifunc = req->hdr.pcifunc; u16 pcifunc = req->hdr.pcifunc;
int blkaddr, pf, type, err;
u16 chan_base, chan, bpid;
struct rvu_pfvf *pfvf; struct rvu_pfvf *pfvf;
int blkaddr, pf, type; struct nix_hw *nix_hw;
u16 chan_base, chan; struct nix_bp *bp;
u64 cfg; u64 cfg;
pf = rvu_get_pf(pcifunc); pf = rvu_get_pf(pcifunc);
...@@ -515,13 +585,29 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, ...@@ -515,13 +585,29 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
return 0; return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc); pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
if (err)
return err;
bp = &nix_hw->bp;
chan_base = pfvf->rx_chan_base + req->chan_base; chan_base = pfvf->rx_chan_base + req->chan_base;
for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg & ~BIT_ULL(16)); cfg & ~BIT_ULL(16));
if (type == NIX_INTF_TYPE_LBK) {
bpid = cfg & GENMASK(8, 0);
mutex_lock(&rvu->rsrc_lock);
rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base);
for (bpid = 0; bpid < bp->bpids.max; bpid++) {
if (bp->fn_map[bpid] == pcifunc) {
bp->fn_map[bpid] = 0;
bp->ref_cnt[bpid] = 0;
}
}
mutex_unlock(&rvu->rsrc_lock);
}
} }
return 0; return 0;
} }
...@@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, ...@@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id) int type, int chan_id)
{ {
int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; int bpid, blkaddr, sdp_chan_base, err;
u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
struct rvu_pfvf *pfvf; struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
u8 cgx_id, lmac_id; u8 cgx_id, lmac_id;
u64 cfg; struct nix_bp *bp;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr);
sdp_chan_cnt = cfg & 0xFFF; if (err)
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; return err;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); bp = &nix_hw->bp;
/* Backpressure IDs range division /* Backpressure IDs range division
* CGX channles are mapped to (0 - 191) BPIDs * CGX channles are mapped to (0 - 191) BPIDs
...@@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, ...@@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/ */
switch (type) { switch (type) {
case NIX_INTF_TYPE_CGX: case NIX_INTF_TYPE_CGX:
if ((req->chan_base + req->chan_cnt) > 16) if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC)
return -EINVAL; return NIX_AF_ERR_INVALID_BPID_REQ;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */ /* Assign bpid based on cgx, lmac and chan id */
bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) +
(lmac_id * lmac_chan_cnt) + req->chan_base; (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base;
if (req->bpid_per_chan) if (req->bpid_per_chan)
bpid += chan_id; bpid += chan_id;
if (bpid > cgx_bpid_cnt) if (bpid > bp->cgx_bpid_cnt)
return -EINVAL; return NIX_AF_ERR_INVALID_BPID;
break; break;
case NIX_INTF_TYPE_LBK: case NIX_INTF_TYPE_LBK:
if ((req->chan_base + req->chan_cnt) > 63) /* Alloc bpid from the free pool */
return -EINVAL; mutex_lock(&rvu->rsrc_lock);
bpid = cgx_bpid_cnt + req->chan_base; bpid = rvu_alloc_rsrc(&bp->bpids);
if (req->bpid_per_chan) if (bpid < 0) {
bpid += chan_id; mutex_unlock(&rvu->rsrc_lock);
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) return NIX_AF_ERR_INVALID_BPID;
return -EINVAL; }
bp->fn_map[bpid] = req->hdr.pcifunc;
bp->ref_cnt[bpid]++;
bpid += bp->free_pool_base;
mutex_unlock(&rvu->rsrc_lock);
break; break;
case NIX_INTF_TYPE_SDP: case NIX_INTF_TYPE_SDP:
if ((req->chan_base + req->chan_cnt) > 255) if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt)
return -EINVAL; return NIX_AF_ERR_INVALID_BPID_REQ;
/* Handle usecase of 2 SDP blocks */
if (!hw->cap.programmable_chans)
sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START;
else
sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base;
bpid = sdp_bpid_cnt + req->chan_base; bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base;
if (req->bpid_per_chan) if (req->bpid_per_chan)
bpid += chan_id; bpid += chan_id;
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt))
return -EINVAL; return NIX_AF_ERR_INVALID_BPID;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -4791,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) ...@@ -4791,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err) if (err)
return err; return err;
err = nix_setup_bpids(rvu, nix_hw, blkaddr);
if (err)
return err;
/* Configure segmentation offload formats */ /* Configure segmentation offload formats */
nix_setup_lso(rvu, nix_hw, blkaddr); nix_setup_lso(rvu, nix_hw, blkaddr);
......
...@@ -439,6 +439,9 @@ ...@@ -439,6 +439,9 @@
#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16) #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
#define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32) #define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32)
#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
/* SSO */ /* SSO */
#define SSO_AF_CONST (0x1000) #define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008) #define SSO_AF_CONST1 (0x1008)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment