Commit 756051e2 authored by Sunil Goutham's avatar Sunil Goutham Committed by David S. Miller

octeontx2-af: Configure block LF's MSIX vector offset

Firmware configures a certain number of MSIX vectors to each of
enabled RVU PF/VF. When a block LF is attached to a PF/VF, number
of MSIX vectors needed by that LF are set aside (out of PF/VF's
total MSIX vectors) and LF's msix_offset is configured in HW.

Also added support for a RVU PF/VF to retrieve that block LF's
MSIX vector offset information from AF via mbox.
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 746ea742
...@@ -122,6 +122,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, ...@@ -122,6 +122,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
M(READY, 0x001, msg_req, ready_msg_rsp) \ M(READY, 0x001, msg_req, ready_msg_rsp) \
M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \ M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \ M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
...@@ -190,4 +191,21 @@ struct rsrc_detach { ...@@ -190,4 +191,21 @@ struct rsrc_detach {
u8 cptlfs:1; u8 cptlfs:1;
}; };
#define MSIX_VECTOR_INVALID 0xFFFF
#define MAX_RVU_BLKLF_CNT 256
struct msix_offset_rsp {
struct mbox_msghdr hdr;
u16 npa_msixoff;
u16 nix_msixoff;
u8 sso;
u8 ssow;
u8 timlfs;
u8 cptlfs;
u16 sso_msixoff[MAX_RVU_BLKLF_CNT];
u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
};
#endif /* MBOX_H */ #endif /* MBOX_H */
...@@ -24,6 +24,11 @@ ...@@ -24,6 +24,11 @@
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
/* Supported devices */ /* Supported devices */
static const struct pci_device_id rvu_id_table[] = { static const struct pci_device_id rvu_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AF) },
...@@ -74,6 +79,45 @@ int rvu_alloc_rsrc(struct rsrc_bmap *rsrc) ...@@ -74,6 +79,45 @@ int rvu_alloc_rsrc(struct rsrc_bmap *rsrc)
return id; return id;
} }
static int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
{
int start;
if (!rsrc->bmap)
return -EINVAL;
start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
if (start >= rsrc->max)
return -ENOSPC;
bitmap_set(rsrc->bmap, start, nrsrc);
return start;
}
static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
{
if (!rsrc->bmap)
return;
if (start >= rsrc->max)
return;
bitmap_clear(rsrc->bmap, start, nrsrc);
}
static bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc)
{
int start;
if (!rsrc->bmap)
return false;
start = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, 0, nrsrc, 0);
if (start >= rsrc->max)
return false;
return true;
}
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id) void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id)
{ {
if (!rsrc->bmap) if (!rsrc->bmap)
...@@ -102,6 +146,26 @@ int rvu_alloc_bitmap(struct rsrc_bmap *rsrc) ...@@ -102,6 +146,26 @@ int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
return 0; return 0;
} }
/* Get block LF's HW index from a PF_FUNC's block slot number */
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
{
u16 match = 0;
int lf;
spin_lock(&rvu->rsrc_lock);
for (lf = 0; lf < block->lf.max; lf++) {
if (block->fn_map[lf] == pcifunc) {
if (slot == match) {
spin_unlock(&rvu->rsrc_lock);
return lf;
}
match++;
}
}
spin_unlock(&rvu->rsrc_lock);
return -ENODEV;
}
/* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E. /* Convert BLOCK_TYPE_E to a BLOCK_ADDR_E.
* Some silicon variants of OcteonTX2 supports * Some silicon variants of OcteonTX2 supports
* multiple blocks of same type. * multiple blocks of same type.
...@@ -236,6 +300,16 @@ inline int rvu_get_pf(u16 pcifunc) ...@@ -236,6 +300,16 @@ inline int rvu_get_pf(u16 pcifunc)
return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
} }
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
{
u64 cfg;
/* Get numVFs attached to this PF and first HWVF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
*numvfs = (cfg >> 12) & 0xFF;
*hwvf = cfg & 0xFFF;
}
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{ {
int pf, func; int pf, func;
...@@ -330,20 +404,150 @@ static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block) ...@@ -330,20 +404,150 @@ static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF); pfvf = rvu_get_pfvf(rvu, (cfg >> 8) & 0xFFFF);
rvu_update_rsrc_map(rvu, pfvf, block, rvu_update_rsrc_map(rvu, pfvf, block,
(cfg >> 8) & 0xFFFF, lf, true); (cfg >> 8) & 0xFFFF, lf, true);
/* Set start MSIX vector for this LF within this PF/VF */
rvu_set_msix_offset(rvu, pfvf, block, lf);
} }
} }
static void rvu_check_min_msix_vec(struct rvu *rvu, int nvecs, int pf, int vf)
{
int min_vecs;
if (!vf)
goto check_pf;
if (!nvecs) {
dev_warn(rvu->dev,
"PF%d:VF%d is configured with zero msix vectors, %d\n",
pf, vf - 1, nvecs);
}
return;
check_pf:
if (pf == 0)
min_vecs = RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT;
else
min_vecs = RVU_PF_INT_VEC_CNT;
if (!(nvecs < min_vecs))
return;
dev_warn(rvu->dev,
"PF%d is configured with too few vectors, %d, min is %d\n",
pf, nvecs, min_vecs);
}
static int rvu_setup_msix_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
int pf, vf, numvfs, hwvf, err;
struct rvu_pfvf *pfvf;
int nvecs, offset;
u64 cfg;
for (pf = 0; pf < hw->total_pfs; pf++) {
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
/* If PF is not enabled, nothing to do */
if (!((cfg >> 20) & 0x01))
continue;
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
pfvf = &rvu->pf[pf];
/* Get num of MSIX vectors attached to this PF */
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_MSIX_CFG(pf));
pfvf->msix.max = ((cfg >> 32) & 0xFFF) + 1;
rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, 0);
/* Alloc msix bitmap for this PF */
err = rvu_alloc_bitmap(&pfvf->msix);
if (err)
return err;
/* Allocate memory for MSIX vector to RVU block LF mapping */
pfvf->msix_lfmap = devm_kcalloc(rvu->dev, pfvf->msix.max,
sizeof(u16), GFP_KERNEL);
if (!pfvf->msix_lfmap)
return -ENOMEM;
/* For PF0 (AF) firmware will set msix vector offsets for
* AF, block AF and PF0_INT vectors, so jump to VFs.
*/
if (!pf)
goto setup_vfmsix;
/* Set MSIX offset for PF's 'RVU_PF_INT_VEC' vectors.
* These are allocated on driver init and never freed,
* so no need to set 'msix_lfmap' for these.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(pf));
nvecs = (cfg >> 12) & 0xFF;
cfg &= ~0x7FFULL;
offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
rvu_write64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_INT_CFG(pf), cfg | offset);
setup_vfmsix:
/* Alloc msix bitmap for VFs */
for (vf = 0; vf < numvfs; vf++) {
pfvf = &rvu->hwvf[hwvf + vf];
/* Get num of MSIX vectors attached to this VF */
cfg = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_PFX_MSIX_CFG(pf));
pfvf->msix.max = (cfg & 0xFFF) + 1;
rvu_check_min_msix_vec(rvu, pfvf->msix.max, pf, vf + 1);
/* Alloc msix bitmap for this VF */
err = rvu_alloc_bitmap(&pfvf->msix);
if (err)
return err;
pfvf->msix_lfmap =
devm_kcalloc(rvu->dev, pfvf->msix.max,
sizeof(u16), GFP_KERNEL);
if (!pfvf->msix_lfmap)
return -ENOMEM;
/* Set MSIX offset for HWVF's 'RVU_VF_INT_VEC' vectors.
* These are allocated on driver init and never freed,
* so no need to set 'msix_lfmap' for these.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM,
RVU_PRIV_HWVFX_INT_CFG(hwvf + vf));
nvecs = (cfg >> 12) & 0xFF;
cfg &= ~0x7FFULL;
offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
rvu_write64(rvu, BLKADDR_RVUM,
RVU_PRIV_HWVFX_INT_CFG(hwvf + vf),
cfg | offset);
}
}
return 0;
}
static void rvu_free_hw_resources(struct rvu *rvu) static void rvu_free_hw_resources(struct rvu *rvu)
{ {
struct rvu_hwinfo *hw = rvu->hw; struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block; struct rvu_block *block;
struct rvu_pfvf *pfvf;
int id; int id;
/* Free all bitmaps */ /* Free block LF bitmaps */
for (id = 0; id < BLK_COUNT; id++) { for (id = 0; id < BLK_COUNT; id++) {
block = &hw->block[id]; block = &hw->block[id];
kfree(block->lf.bmap); kfree(block->lf.bmap);
} }
/* Free MSIX bitmaps */
for (id = 0; id < hw->total_pfs; id++) {
pfvf = &rvu->pf[id];
kfree(pfvf->msix.bmap);
}
for (id = 0; id < hw->total_vfs; id++) {
pfvf = &rvu->hwvf[id];
kfree(pfvf->msix.bmap);
}
} }
static int rvu_setup_hw_resources(struct rvu *rvu) static int rvu_setup_hw_resources(struct rvu *rvu)
...@@ -499,6 +703,12 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -499,6 +703,12 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
if (!rvu->hwvf) if (!rvu->hwvf)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&rvu->rsrc_lock);
err = rvu_setup_msix_resources(rvu);
if (err)
return err;
for (blkid = 0; blkid < BLK_COUNT; blkid++) { for (blkid = 0; blkid < BLK_COUNT; blkid++) {
block = &hw->block[blkid]; block = &hw->block[blkid];
if (!block->lf.bmap) if (!block->lf.bmap)
...@@ -516,8 +726,6 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -516,8 +726,6 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
rvu_scan_block(rvu, block); rvu_scan_block(rvu, block);
} }
spin_lock_init(&rvu->rsrc_lock);
return 0; return 0;
} }
...@@ -603,6 +811,9 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) ...@@ -603,6 +811,9 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
/* Free the resource */ /* Free the resource */
rvu_free_rsrc(&block->lf, lf); rvu_free_rsrc(&block->lf, lf);
/* Clear MSIX vector offset for this LF */
rvu_clear_msix_offset(rvu, pfvf, block, lf);
} }
} }
...@@ -696,6 +907,9 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, ...@@ -696,6 +907,9 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc,
(lf << block->lfshift), cfg); (lf << block->lfshift), cfg);
rvu_update_rsrc_map(rvu, pfvf, block, rvu_update_rsrc_map(rvu, pfvf, block,
pcifunc, lf, true); pcifunc, lf, true);
/* Set start MSIX vector for this LF within this PF/VF */
rvu_set_msix_offset(rvu, pfvf, block, lf);
} }
} }
...@@ -871,6 +1085,119 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, ...@@ -871,6 +1085,119 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
return err; return err;
} }
static u16 rvu_get_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
int blkaddr, int lf)
{
u16 vec;
if (lf < 0)
return MSIX_VECTOR_INVALID;
for (vec = 0; vec < pfvf->msix.max; vec++) {
if (pfvf->msix_lfmap[vec] == MSIX_BLKLF(blkaddr, lf))
return vec;
}
return MSIX_VECTOR_INVALID;
}
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf)
{
u16 nvecs, vec, offset;
u64 cfg;
cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
(lf << block->lfshift));
nvecs = (cfg >> 12) & 0xFF;
/* Check and alloc MSIX vectors, must be contiguous */
if (!rvu_rsrc_check_contig(&pfvf->msix, nvecs))
return;
offset = rvu_alloc_rsrc_contig(&pfvf->msix, nvecs);
/* Config MSIX offset in LF */
rvu_write64(rvu, block->addr, block->msixcfg_reg |
(lf << block->lfshift), (cfg & ~0x7FFULL) | offset);
/* Update the bitmap as well */
for (vec = 0; vec < nvecs; vec++)
pfvf->msix_lfmap[offset + vec] = MSIX_BLKLF(block->addr, lf);
}
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf)
{
u16 nvecs, vec, offset;
u64 cfg;
cfg = rvu_read64(rvu, block->addr, block->msixcfg_reg |
(lf << block->lfshift));
nvecs = (cfg >> 12) & 0xFF;
/* Clear MSIX offset in LF */
rvu_write64(rvu, block->addr, block->msixcfg_reg |
(lf << block->lfshift), cfg & ~0x7FFULL);
offset = rvu_get_msix_offset(rvu, pfvf, block->addr, lf);
/* Update the mapping */
for (vec = 0; vec < nvecs; vec++)
pfvf->msix_lfmap[offset + vec] = 0;
/* Free the same in MSIX bitmap */
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
int lf, slot;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->msix.bmap)
return 0;
/* Set MSIX offsets for each block's LFs attached to this PF/VF */
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
rsp->sso = pfvf->sso;
for (slot = 0; slot < rsp->sso; slot++) {
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSO], pcifunc, slot);
rsp->sso_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSO, lf);
}
rsp->ssow = pfvf->ssow;
for (slot = 0; slot < rsp->ssow; slot++) {
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_SSOW], pcifunc, slot);
rsp->ssow_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_SSOW, lf);
}
rsp->timlfs = pfvf->timlfs;
for (slot = 0; slot < rsp->timlfs; slot++) {
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_TIM], pcifunc, slot);
rsp->timlf_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_TIM, lf);
}
rsp->cptlfs = pfvf->cptlfs;
for (slot = 0; slot < rsp->cptlfs; slot++) {
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT0], pcifunc, slot);
rsp->cptlf_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
}
return 0;
}
static int rvu_process_mbox_msg(struct rvu *rvu, int devid, static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
struct mbox_msghdr *req) struct mbox_msghdr *req)
{ {
......
...@@ -65,6 +65,11 @@ struct rvu_pfvf { ...@@ -65,6 +65,11 @@ struct rvu_pfvf {
u16 ssow; u16 ssow;
u16 cptlfs; u16 cptlfs;
u16 timlfs; u16 timlfs;
/* Block LF's MSIX vector info */
struct rsrc_bmap msix; /* Bitmap for MSIX vector alloc */
#define MSIX_BLKLF(blkaddr, lf) (((blkaddr) << 8) | ((lf) & 0xFF))
u16 *msix_lfmap; /* Vector to block LF mapping */
}; };
struct rvu_hwinfo { struct rvu_hwinfo {
...@@ -126,7 +131,9 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); ...@@ -126,7 +131,9 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_get_pf(u16 pcifunc); int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
......
...@@ -54,6 +54,7 @@ enum rvu_af_int_vec_e { ...@@ -54,6 +54,7 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_PFME = 0x2, RVU_AF_INT_VEC_PFME = 0x2,
RVU_AF_INT_VEC_GEN = 0x3, RVU_AF_INT_VEC_GEN = 0x3,
RVU_AF_INT_VEC_MBOX = 0x4, RVU_AF_INT_VEC_MBOX = 0x4,
RVU_AF_INT_VEC_CNT = 0x5,
}; };
/** /**
...@@ -67,6 +68,7 @@ enum rvu_pf_int_vec_e { ...@@ -67,6 +68,7 @@ enum rvu_pf_int_vec_e {
RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4, RVU_PF_INT_VEC_VFPF_MBOX0 = 0x4,
RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5, RVU_PF_INT_VEC_VFPF_MBOX1 = 0x5,
RVU_PF_INT_VEC_AFPF_MBOX = 0x6, RVU_PF_INT_VEC_AFPF_MBOX = 0x6,
RVU_PF_INT_VEC_CNT = 0x7,
}; };
#endif /* RVU_STRUCT_H */ #endif /* RVU_STRUCT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment