Commit 2cb59424 authored by David S. Miller's avatar David S. Miller

Merge branch 'octeonx2-mcam-management-rework'

Subbaraya Sundeep says:

====================
octeontx2: Rework MCAM flows management for VFs

From Octeontx2 hardware point of view there is no
difference between PFs and VFs. Hence with refactoring
in driver the packet classification features or offloads
can be supported by VFs also. This patchset unifies the
mcam flows management so that VFs can also support
ntuple filters. Since there are MCAM allocations by
all PFs and VFs in the system it is required to have
the ability to modify number of mcam rules count
for a PF/VF in runtime. This is achieved by using devlink.
Below is the summary of patches:

Patch 1,2,3 are trivial patches which helps in debugging
in case of errors by using custom error codes and
displaying proper error messages.

Patches 4,5 brings rx-all and ntuple support
for CGX mapped VFs and LBK VFs.

Patches 6,7,8 brings devlink support to
PF netdev driver so that mcam entries count
can be changed at runtime.
To change mcam rule count at runtime where multiple rule
allocations are done sorting is required.
Also both ntuple and TC rules needs to be unified.

Patch 9 is related to AF NPC where a PF
allocated entries are allocated at bottom(low priority).

On CN10K there is slight change in reading
NPC counters which is handled by patch 10.

Patch 11 is to allow packets from CPT for
NPC parsing on CN10K.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 354e1f9d aee51224
......@@ -1078,6 +1078,13 @@ enum npc_af_status {
NPC_MCAM_ALLOC_DENIED = -702,
NPC_MCAM_ALLOC_FAILED = -703,
NPC_MCAM_PERM_DENIED = -704,
NPC_FLOW_INTF_INVALID = -707,
NPC_FLOW_CHAN_INVALID = -708,
NPC_FLOW_NO_NIXLF = -709,
NPC_FLOW_NOT_SUPPORTED = -710,
NPC_FLOW_VF_PERM_DENIED = -711,
NPC_FLOW_VF_NOT_INIT = -712,
NPC_FLOW_VF_OVERLAP = -713,
};
struct npc_mcam_alloc_entry_req {
......@@ -1426,4 +1433,13 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit;
};
/* CGX mailbox error codes
* Range 1101 - 1200.
*/
enum cgx_af_status {
LMAC_AF_ERR_INVALID_PARAM = -1101,
LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
LMAC_AF_ERR_PERM_DENIED = -1103,
};
#endif /* MBOX_H */
......@@ -924,16 +924,26 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfreset_reg = NPA_AF_LF_RST;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate NPA LF bitmap\n", __func__);
return err;
}
nix:
err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate NIX0 LFs bitmap\n", __func__);
return err;
}
err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate NIX1 LFs bitmap\n", __func__);
return err;
}
/* Init SSO group's bitmap */
block = &hw->block[BLKADDR_SSO];
......@@ -953,8 +963,11 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate SSO LF bitmap\n", __func__);
return err;
}
ssow:
/* Init SSO workslot's bitmap */
......@@ -974,8 +987,11 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate SSOW LF bitmap\n", __func__);
return err;
}
tim:
/* Init TIM LF's bitmap */
......@@ -996,35 +1012,55 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfreset_reg = TIM_AF_LF_RST;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate TIM LF bitmap\n", __func__);
return err;
}
cpt:
err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate CPT0 LF bitmap\n", __func__);
return err;
}
err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
if (err) {
dev_err(rvu->dev,
"%s: Failed to allocate CPT1 LF bitmap\n", __func__);
return err;
}
if (err)
return err;
/* Allocate memory for PFVF data */
rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
if (!rvu->pf)
if (!rvu->pf) {
dev_err(rvu->dev,
"%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
}
rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
if (!rvu->hwvf)
if (!rvu->hwvf) {
dev_err(rvu->dev,
"%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__);
return -ENOMEM;
}
mutex_init(&rvu->rsrc_lock);
rvu_fwdata_init(rvu);
err = rvu_setup_msix_resources(rvu);
if (err)
if (err) {
dev_err(rvu->dev,
"%s: Failed to setup MSIX resources\n", __func__);
return err;
}
for (blkid = 0; blkid < BLK_COUNT; blkid++) {
block = &hw->block[blkid];
......@@ -1050,25 +1086,33 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
goto msix_err;
err = rvu_npc_init(rvu);
if (err)
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__);
goto npc_err;
}
err = rvu_cgx_init(rvu);
if (err)
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__);
goto cgx_err;
}
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
err = rvu_npa_init(rvu);
if (err)
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__);
goto npa_err;
}
rvu_get_lbk_bufsize(rvu);
err = rvu_nix_init(rvu);
if (err)
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__);
goto nix_err;
}
rvu_program_channels(rvu);
......@@ -2984,27 +3028,37 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
rvu->hw->total_pfs, rvu_afpf_mbox_handler,
rvu_afpf_mbox_up_handler);
if (err)
if (err) {
dev_err(dev, "%s: Failed to initialize mbox\n", __func__);
goto err_hwsetup;
}
err = rvu_flr_init(rvu);
if (err)
if (err) {
dev_err(dev, "%s: Failed to initialize flr\n", __func__);
goto err_mbox;
}
err = rvu_register_interrupts(rvu);
if (err)
if (err) {
dev_err(dev, "%s: Failed to register interrupts\n", __func__);
goto err_flr;
}
err = rvu_register_dl(rvu);
if (err)
if (err) {
dev_err(dev, "%s: Failed to register devlink\n", __func__);
goto err_irq;
}
rvu_setup_rvum_blk_revid(rvu);
/* Enable AF's VFs (if any) */
err = rvu_enable_sriov(rvu);
if (err)
if (err) {
dev_err(dev, "%s: Failed to enable sriov\n", __func__);
goto err_dl;
}
/* Initialize debugfs */
rvu_dbg_init(rvu);
......
......@@ -356,6 +356,7 @@ struct rvu_hwinfo {
u16 npc_counters; /* No of match stats counters */
u32 lbk_bufsize; /* FIFO size supported by LBK */
bool npc_ext_set; /* Extended register set */
u64 npc_stat_ena; /* Match stats enable bit */
struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
......
......@@ -448,7 +448,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
return -EPERM;
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
......@@ -507,7 +507,7 @@ static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
void *cgxd;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -ENODEV;
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
......@@ -561,7 +561,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
void *cgxd;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
......@@ -888,7 +888,7 @@ int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_pf_cgxmapped(rvu, pf))
return -EPERM;
return LMAC_AF_ERR_PF_NOT_MAPPED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
......@@ -1046,7 +1046,7 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_reset(cgx_id, lmac_id);
......@@ -1060,7 +1060,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
......
......@@ -984,7 +984,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
}
......@@ -1405,7 +1405,7 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
cfg = (((u32)req->offset & 0x7) << 16) |
(((u32)req->y_mask & 0xF) << 12) |
......@@ -1673,7 +1673,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
mutex_lock(&rvu->rsrc_lock);
......@@ -1795,7 +1795,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
......@@ -1866,7 +1866,7 @@ static int nix_txschq_free_one(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
......@@ -2066,7 +2066,7 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
......@@ -2164,8 +2164,12 @@ static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
u16 pcifunc, int index)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
struct nix_txvlan *vlan = &nix_hw->txvlan;
struct nix_txvlan *vlan;
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
vlan = &nix_hw->txvlan;
if (vlan->entry2pfvf_map[index] != pcifunc)
return NIX_AF_ERR_PARAM;
......@@ -2206,10 +2210,15 @@ static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
u64 vtag, u8 size)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
struct nix_txvlan *vlan = &nix_hw->txvlan;
struct nix_txvlan *vlan;
u64 regval;
int index;
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock);
index = rvu_alloc_rsrc(&vlan->rsrc);
......@@ -2234,12 +2243,16 @@ static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
struct nix_vtag_config *req)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
struct nix_txvlan *vlan = &nix_hw->txvlan;
u16 pcifunc = req->hdr.pcifunc;
int idx0 = req->tx.vtag0_idx;
int idx1 = req->tx.vtag1_idx;
struct nix_txvlan *vlan;
int err = 0;
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
vlan = &nix_hw->txvlan;
if (req->tx.free_vtag0 && req->tx.free_vtag1)
if (vlan->entry2pfvf_map[idx0] != pcifunc ||
vlan->entry2pfvf_map[idx1] != pcifunc)
......@@ -2266,9 +2279,13 @@ static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
struct nix_vtag_config_rsp *rsp)
{
struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
struct nix_txvlan *vlan = &nix_hw->txvlan;
struct nix_txvlan *vlan;
u16 pcifunc = req->hdr.pcifunc;
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
vlan = &nix_hw->txvlan;
if (req->tx.cfg_vtag0) {
rsp->vtag0_idx =
nix_tx_vtag_alloc(rvu, blkaddr,
......@@ -3142,7 +3159,7 @@ static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
hw = get_nix_hw(rvu->hw, blkaddr);
if (!hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
/* No room to add new flow hash algoritham */
if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
......@@ -3182,7 +3199,7 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
/* Failed to get algo index from the exiting list, reserve new */
......@@ -3459,7 +3476,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
if (is_afvf(pcifunc))
rvu_get_lbk_link_max_frs(rvu, &max_mtu);
......@@ -4126,7 +4143,7 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
return NIX_AF_ERR_INVALID_NIXBLK;
/* Find existing matching LSO format, if any */
for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
......
......@@ -724,7 +724,17 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
action.index = pfvf->promisc_mce_idx;
}
/* For cn10k the upper two bits of the channel number are
* cpt channel number. with masking out these bits in the
* mcam entry, same entry used for NIX will allow packets
* received from cpt for parsing.
*/
if (!is_rvu_otx2(rvu)) {
req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
} else {
req.chan_mask = 0xFFFU;
}
if (chan_cnt > 1) {
if (!is_power_of_2(chan_cnt)) {
dev_err(rvu->dev,
......@@ -1898,9 +1908,22 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
mcam->banks = (npc_const >> 44) & 0xFULL;
mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
hw->npc_stat_ena = BIT_ULL(9);
/* Extended set */
if (npc_const2) {
hw->npc_ext_set = true;
/* 96xx supports only match_stats and npc_counters
* reflected in NPC_AF_CONST reg.
* STAT_SEL and ENA are at [0:8] and 9 bit positions.
* 98xx has both match_stat and ext and npc_counter
* reflected in NPC_AF_CONST2
* STAT_SEL_EXT added at [12:14] bit position.
* cn10k supports only ext and hence npc_counters in
* NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2.
* STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63
*/
if (!hw->npc_counters)
hw->npc_stat_ena = BIT_ULL(63);
hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
mcam->banksize = npc_const2 & 0xFFFFULL;
}
......@@ -1955,7 +1978,7 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NPC_AF_INTFX_MISS_STAT_ACT(intf),
((mcam->rx_miss_act_cntr >> 9) << 12) |
BIT_ULL(9) | mcam->rx_miss_act_cntr);
hw->npc_stat_ena | mcam->rx_miss_act_cntr);
}
/* Configure TX interfaces */
......@@ -2147,18 +2170,16 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 entry, u16 cntr)
{
u16 index = entry & (mcam->banksize - 1);
u16 bank = npc_get_bank(mcam, entry);
u32 bank = npc_get_bank(mcam, entry);
struct rvu_hwinfo *hw = rvu->hw;
/* Set mapping and increment counter's refcnt */
mcam->entry2cntr_map[entry] = cntr;
mcam->cntr_refcnt[cntr]++;
/* Enable stats
* NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9]
* NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0]
*/
/* Enable stats */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
((cntr >> 9) << 12) | BIT_ULL(9) | cntr);
((cntr >> 9) << 12) | hw->npc_stat_ena | cntr);
}
static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
......@@ -2414,6 +2435,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
/* For a VF base MCAM match rule is set by its PF. And all the
* further MCAM rules installed by VF on its own are
* concatenated with the base rule set by its PF. Hence PF entries
* should be at lower priority compared to VF entries. Otherwise
* base rule is hit always and rules installed by VF will be of
* no use. Hence if the request is from PF and NOT a priority
* allocation request then allocate low priority entries.
*/
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
goto lprio_alloc;
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
......@@ -2439,6 +2471,7 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
/* Not enough free entries, search all entries in reverse,
* so that low priority ones will get used up.
*/
lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
......@@ -3252,7 +3285,7 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu,
/* read MCAM entry STAT_ACT register */
regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank));
if (!(regval & BIT_ULL(9))) {
if (!(regval & rvu->hw->npc_stat_ena)) {
rsp->stat_ena = 0;
mutex_unlock(&mcam->lock);
return 0;
......
......@@ -600,7 +600,7 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
dev_info(rvu->dev, "Unsupported flow(s):\n");
for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
return NIX_AF_ERR_NPC_KEY_NOT_SUPP;
return -EOPNOTSUPP;
}
return 0;
......@@ -995,13 +995,11 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_npc_mcam_rule dummy = { 0 };
struct rvu_npc_mcam_rule *rule;
bool new = false, msg_from_vf;
u16 owner = req->hdr.pcifunc;
struct msg_rsp write_rsp;
struct mcam_entry *entry;
int entry_index, err;
msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK);
bool new = false;
installed_features = req->features;
features = req->features;
......@@ -1027,7 +1025,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
}
/* update mcam entry with default unicast rule attributes */
if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) {
if (def_ucast_rule && (req->default_rule && req->append)) {
missing_features = (def_ucast_rule->features ^ features) &
def_ucast_rule->features;
if (missing_features)
......@@ -1130,6 +1128,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
struct npc_install_flow_rsp *rsp)
{
bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
struct rvu_switch *rswitch = &rvu->rswitch;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
bool pf_set_vfs_mac = false;
......@@ -1139,14 +1138,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
return -ENODEV;
return NPC_MCAM_INVALID_REQ;
}
if (!is_npc_interface_valid(rvu, req->intf))
return -EINVAL;
return NPC_FLOW_INTF_INVALID;
if (from_vf && req->default_rule)
return NPC_MCAM_PERM_DENIED;
return NPC_FLOW_VF_PERM_DENIED;
/* Each PF/VF info is maintained in struct rvu_pfvf.
* rvu_pfvf for the target PF/VF needs to be retrieved
......@@ -1172,12 +1171,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
err = npc_check_unsupported_flows(rvu, req->features, req->intf);
if (err)
return err;
return NPC_FLOW_NOT_SUPPORTED;
/* Skip channel validation if AF is installing */
if (!is_pffunc_af(req->hdr.pcifunc) &&
npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
return -EINVAL;
return NPC_FLOW_CHAN_INVALID;
pfvf = rvu_get_pfvf(rvu, target);
......@@ -1195,7 +1194,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
/* Proceed if NIXLF is attached or not for TX rules */
err = nix_get_nixlf(rvu, target, &nixlf, NULL);
if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
return -EINVAL;
return NPC_FLOW_NO_NIXLF;
/* don't enable rule when nixlf not attached or initialized */
if (!(is_nixlf_attached(rvu, target) &&
......@@ -1211,7 +1210,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
/* Do not allow requests from uninitialized VFs */
if (from_vf && !enable)
return -EINVAL;
return NPC_FLOW_VF_NOT_INIT;
/* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
if (pf_set_vfs_mac && !enable) {
......@@ -1221,15 +1220,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
return 0;
}
/* If message is from VF then its flow should not overlap with
* reserved unicast flow.
*/
if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) &&
pfvf->def_ucast_rule->features & req->features)
return -EINVAL;
mutex_lock(&rswitch->switch_lock);
err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf,
req, rsp, enable, pf_set_vfs_mac);
mutex_unlock(&rswitch->switch_lock);
return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp,
enable, pf_set_vfs_mac);
return err;
}
static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
......
......@@ -7,7 +7,8 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
rvu_nicvf-y := otx2_vf.o
otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
......@@ -19,11 +19,13 @@
#include <linux/timecounter.h>
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
#include <mbox.h>
#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include "otx2_devlink.h"
#include <rvu_trace.h>
/* PCI device IDs */
......@@ -268,7 +270,6 @@ struct otx2_mac_table {
};
struct otx2_flow_config {
u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
u16 *flow_ent;
u16 *def_ent;
u16 nr_flows;
......@@ -279,16 +280,13 @@ struct otx2_flow_config {
#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS)
u16 ntuple_offset;
u16 unicast_offset;
u16 rx_vlan_offset;
u16 vf_vlan_offset;
#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
u16 tc_flower_offset;
u16 ntuple_max_flows;
u16 tc_max_flows;
u16 max_flows;
u8 dmacflt_max_flows;
u8 *bmap_to_dmacindex;
unsigned long dmacflt_bmap;
......@@ -299,8 +297,7 @@ struct otx2_tc_info {
/* hash table to store TC offloaded flows */
struct rhashtable flow_table;
struct rhashtable_params flow_ht_params;
DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS);
unsigned long num_entries;
unsigned long *tc_entries_bitmap;
};
struct dev_hw_ops {
......@@ -353,6 +350,11 @@ struct otx2_nic {
struct otx2_vf_config *vf_configs;
struct cgx_link_user_info linfo;
/* NPC MCAM */
struct otx2_flow_config *flow_cfg;
struct otx2_mac_table *mac_table;
struct otx2_tc_info tc_info;
u64 reset_count;
struct work_struct reset_task;
struct workqueue_struct *flr_wq;
......@@ -360,7 +362,6 @@ struct otx2_nic {
struct refill_work *refill_wrk;
struct workqueue_struct *otx2_wq;
struct work_struct rx_mode_work;
struct otx2_mac_table *mac_table;
/* Ethtool stuff */
u32 msg_enable;
......@@ -376,9 +377,10 @@ struct otx2_nic {
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
struct otx2_flow_config *flow_cfg;
struct otx2_tc_info tc_info;
unsigned long rq_bmap;
/* Devlink */
struct otx2_devlink *dl;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
......@@ -710,6 +712,11 @@ MBOX_UP_CGX_MESSAGES
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
static inline bool is_otx2_vf(u16 pcifunc)
{
return !!(pcifunc & RVU_PFVF_FUNC_MASK);
}
static inline int rvu_get_pf(u16 pcifunc)
{
return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
......@@ -815,7 +822,8 @@ int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count);
void otx2_mcam_flow_del(struct otx2_nic *pf);
int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
......@@ -828,6 +836,7 @@ int otx2_add_flow(struct otx2_nic *pfvf,
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
int otx2_get_maxflows(struct otx2_flow_config *flow_cfg);
void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
......@@ -839,6 +848,7 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
......
// SPDX-License-Identifier: GPL-2.0
/* Marvell RVU PF/VF Netdev Devlink
*
* Copyright (C) 2021 Marvell.
*/
#include "otx2_common.h"
/* Devlink Params APIs */
static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
struct otx2_flow_config *flow_cfg;
if (!pfvf->flow_cfg) {
NL_SET_ERR_MSG_MOD(extack,
"pfvf->flow_cfg not initialized");
return -EINVAL;
}
flow_cfg = pfvf->flow_cfg;
if (flow_cfg && flow_cfg->nr_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot modify count when there are active rules");
return -EINVAL;
}
return 0;
}
static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
if (!pfvf->flow_cfg)
return 0;
otx2_alloc_mcam_entries(pfvf, ctx->val.vu16);
otx2_tc_alloc_ent_bitmap(pfvf);
return 0;
}
static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
struct otx2_flow_config *flow_cfg;
if (!pfvf->flow_cfg) {
ctx->val.vu16 = 0;
return 0;
}
flow_cfg = pfvf->flow_cfg;
ctx->val.vu16 = flow_cfg->max_flows;
return 0;
}
enum otx2_dl_param_id {
OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
};
static const struct devlink_param otx2_dl_params[] = {
DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
"mcam_count", DEVLINK_PARAM_TYPE_U16,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
otx2_dl_mcam_count_validate),
};
/* Devlink OPs */
static int otx2_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
if (is_otx2_vf(pfvf->pcifunc))
return devlink_info_driver_name_put(req, "rvu_nicvf");
return devlink_info_driver_name_put(req, "rvu_nicpf");
}
static const struct devlink_ops otx2_devlink_ops = {
.info_get = otx2_devlink_info_get,
};
int otx2_register_dl(struct otx2_nic *pfvf)
{
struct otx2_devlink *otx2_dl;
struct devlink *dl;
int err;
dl = devlink_alloc(&otx2_devlink_ops,
sizeof(struct otx2_devlink), pfvf->dev);
if (!dl) {
dev_warn(pfvf->dev, "devlink_alloc failed\n");
return -ENOMEM;
}
err = devlink_register(dl);
if (err) {
dev_err(pfvf->dev, "devlink register failed with error %d\n", err);
devlink_free(dl);
return err;
}
otx2_dl = devlink_priv(dl);
otx2_dl->dl = dl;
otx2_dl->pfvf = pfvf;
pfvf->dl = otx2_dl;
err = devlink_params_register(dl, otx2_dl_params,
ARRAY_SIZE(otx2_dl_params));
if (err) {
dev_err(pfvf->dev,
"devlink params register failed with error %d", err);
goto err_dl;
}
devlink_params_publish(dl);
return 0;
err_dl:
devlink_unregister(dl);
devlink_free(dl);
return err;
}
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
struct otx2_devlink *otx2_dl = pfvf->dl;
struct devlink *dl;
if (!otx2_dl || !otx2_dl->dl)
return;
dl = otx2_dl->dl;
devlink_params_unregister(dl, otx2_dl_params,
ARRAY_SIZE(otx2_dl_params));
devlink_unregister(dl);
devlink_free(dl);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell RVU PF/VF Netdev Devlink
*
* Copyright (C) 2021 Marvell.
*
*/
#ifndef OTX2_DEVLINK_H
#define OTX2_DEVLINK_H
struct otx2_devlink {
struct devlink *dl;
struct otx2_nic *pfvf;
};
/* Devlink APIs */
int otx2_register_dl(struct otx2_nic *pfvf);
void otx2_unregister_dl(struct otx2_nic *pfvf);
#endif /* RVU_DEVLINK_H */
......@@ -645,6 +645,7 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
static int otx2_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rules)
{
bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
......@@ -654,13 +655,17 @@ static int otx2_get_rxnfc(struct net_device *dev,
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
if (netif_running(dev) && ntuple) {
nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRULE:
if (netif_running(dev) && ntuple)
ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
if (netif_running(dev) && ntuple)
ret = otx2_get_all_flows(pfvf, nfc, rules);
break;
case ETHTOOL_GRXFH:
......@@ -696,41 +701,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
return ret;
}
static int otx2vf_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *nfc, u32 *rules)
{
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
switch (nfc->cmd) {
case ETHTOOL_GRXRINGS:
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
break;
}
return ret;
}
static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
switch (nfc->cmd) {
case ETHTOOL_SRXFH:
ret = otx2_set_rss_hash_opts(pfvf, nfc);
break;
default:
break;
}
return ret;
}
static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
......@@ -1357,8 +1327,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_sset_count = otx2vf_get_sset_count,
.set_channels = otx2_set_channels,
.get_channels = otx2_get_channels,
.get_rxnfc = otx2vf_get_rxnfc,
.set_rxnfc = otx2vf_set_rxnfc,
.get_rxnfc = otx2_get_rxnfc,
.set_rxnfc = otx2_set_rxnfc,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
......
......@@ -5,11 +5,14 @@
*/
#include <net/ipv6.h>
#include <linux/sort.h>
#include "otx2_common.h"
#define OTX2_DEFAULT_ACTION 0x1
static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
......@@ -30,8 +33,7 @@ static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_
{
devm_kfree(pfvf->dev, flow_cfg->flow_ent);
flow_cfg->flow_ent = NULL;
flow_cfg->ntuple_max_flows = 0;
flow_cfg->tc_max_flows = 0;
flow_cfg->max_flows = 0;
}
static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
......@@ -40,11 +42,11 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
struct npc_mcam_free_entry_req *req;
int ent, err;
if (!flow_cfg->ntuple_max_flows)
if (!flow_cfg->max_flows)
return 0;
mutex_lock(&pfvf->mbox.lock);
for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
for (ent = 0; ent < flow_cfg->max_flows; ent++) {
req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
if (!req)
break;
......@@ -61,7 +63,12 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
return 0;
}
static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
static int mcam_entry_cmp(const void *a, const void *b)
{
return *(u16 *)a - *(u16 *)b;
}
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
......@@ -76,8 +83,12 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
sizeof(u16), GFP_KERNEL);
if (!flow_cfg->flow_ent)
if (!flow_cfg->flow_ent) {
netdev_err(pfvf->netdev,
"%s: Unable to allocate memory for flow entries\n",
__func__);
return -ENOMEM;
}
mutex_lock(&pfvf->mbox.lock);
......@@ -92,8 +103,14 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
req->contig = false;
req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
/* Allocate higher priority entries for PFs, so that VF's entries
* will be on top of PF.
*/
if (!is_otx2_vf(pfvf->pcifunc)) {
req->priority = NPC_MCAM_HIGHER_PRIO;
req->ref_entry = flow_cfg->def_ent[0];
}
/* Send message to AF */
if (otx2_sync_mbox_msg(&pfvf->mbox))
......@@ -114,22 +131,34 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
break;
}
/* Multiple MCAM entry alloc requests could result in non-sequential
* MCAM entries in the flow_ent[] array. Sort them in an ascending order,
* otherwise user installed ntuple filter index and MCAM entry index will
* not be in sync.
*/
if (allocated)
sort(&flow_cfg->flow_ent[0], allocated,
sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
exit:
mutex_unlock(&pfvf->mbox.lock);
flow_cfg->ntuple_offset = 0;
flow_cfg->ntuple_max_flows = allocated;
flow_cfg->tc_max_flows = allocated;
flow_cfg->max_flows = allocated;
if (allocated) {
pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
}
if (allocated != count)
netdev_info(pfvf->netdev,
"Unable to allocate %d MCAM entries for ntuple, got %d\n",
"Unable to allocate %d MCAM entries, got only %d\n",
count, allocated);
return allocated;
}
EXPORT_SYMBOL(otx2_alloc_mcam_entries);
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
......@@ -189,18 +218,35 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
}
pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
return 0;
}
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg;
pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
sizeof(struct otx2_flow_config),
GFP_KERNEL);
if (!pfvf->flow_cfg)
return -ENOMEM;
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
flow_cfg->max_flows = 0;
return 0;
}
EXPORT_SYMBOL(otx2vf_mcam_flow_init);
int otx2_mcam_flow_init(struct otx2_nic *pf)
{
int err;
......@@ -212,7 +258,10 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
err = otx2_alloc_mcam_entries(pf);
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
*/
err = otx2_mcam_entry_init(pf);
if (err)
return err;
......@@ -248,6 +297,7 @@ void otx2_mcam_flow_del(struct otx2_nic *pf)
{
otx2_destroy_mcam_flows(pf);
}
EXPORT_SYMBOL(otx2_mcam_flow_del);
/* On success adds mcam entry
* On failure enable promisous mode
......@@ -379,15 +429,19 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
{
if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
if (!flow_cfg)
return 0;
if (flow_cfg->nr_flows == flow_cfg->max_flows ||
bitmap_weight(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
return flow_cfg->ntuple_max_flows;
return flow_cfg->max_flows;
}
EXPORT_SYMBOL(otx2_get_maxflows);
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
......@@ -732,7 +786,7 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
ether_addr_copy(pmask->dmac, eth_mask->h_dest);
req->features |= BIT_ULL(NPC_DMAC);
}
if (eth_mask->h_proto) {
if (eth_hdr->h_proto) {
memcpy(&pkt->etype, &eth_hdr->h_proto,
sizeof(pkt->etype));
memcpy(&pmask->etype, &eth_mask->h_proto,
......@@ -894,7 +948,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
pf_mac->entry = 0;
pf_mac->dmac_filter = true;
pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
pf_mac->location = pfvf->flow_cfg->max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
pf_mac->flow_spec.location = pf_mac->location;
......@@ -975,7 +1029,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
flow->dmac_filter = true;
flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
fsp->location = flow_cfg->max_flows + flow->entry;
flow->flow_spec.location = fsp->location;
flow->location = fsp->location;
......@@ -983,11 +1037,11 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
} else {
if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
if (flow->location >= pfvf->flow_cfg->max_flows) {
netdev_warn(pfvf->netdev,
"Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
flow->location,
flow_cfg->ntuple_max_flows - 1);
flow_cfg->max_flows - 1);
err = -EINVAL;
} else {
flow->entry = flow_cfg->flow_ent[flow->location];
......@@ -996,6 +1050,8 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
}
if (err) {
if (err == MBOX_MSG_INVALID)
err = -EINVAL;
if (new)
kfree(flow);
return err;
......@@ -1140,7 +1196,7 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
}
req->start = flow_cfg->flow_ent[0];
req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
......
......@@ -1787,17 +1787,10 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
/* check if n-tuple filters are ON */
if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) {
netdev_info(dev, "Disabling n-tuple filters\n");
features &= ~NETIF_F_NTUPLE;
}
/* check if tc hw offload is ON */
if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) {
netdev_info(dev, "Disabling TC hardware offload\n");
features &= ~NETIF_F_HW_TC;
}
if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_STAG_RX;
else
features &= ~NETIF_F_HW_VLAN_STAG_RX;
return features;
}
......@@ -1854,6 +1847,7 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t changed = features ^ netdev->features;
bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
bool tc = !!(features & NETIF_F_HW_TC);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
......@@ -1866,12 +1860,26 @@ static int otx2_set_features(struct net_device *netdev,
if ((changed & NETIF_F_NTUPLE) && !ntuple)
otx2_destroy_ntuple_flows(pf);
if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
pf->tc_info.num_entries) {
if ((changed & NETIF_F_HW_TC) && !tc &&
pf->flow_cfg && pf->flow_cfg->nr_flows) {
netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
return -EBUSY;
}
if ((changed & NETIF_F_NTUPLE) && ntuple &&
(netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
netdev_err(netdev,
"Can't enable NTUPLE when TC is active, disable TC and retry\n");
return -EINVAL;
}
if ((changed & NETIF_F_HW_TC) && tc &&
(netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
netdev_err(netdev,
"Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
return -EINVAL;
}
return 0;
}
......@@ -2569,8 +2577,6 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
err = otx2_mcam_flow_init(pf);
if (err)
goto err_ptp_destroy;
......@@ -2594,12 +2600,13 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
netdev->hw_features |= NETIF_F_HW_TC;
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
/* MTU range: 64 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
......@@ -2619,6 +2626,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_mcam_flow_del;
err = otx2_register_dl(pf);
if (err)
goto err_mcam_flow_del;
/* Initialize SR-IOV resources */
err = otx2_sriov_vfcfg_init(pf);
if (err)
......@@ -2776,6 +2787,7 @@ static void otx2_remove(struct pci_dev *pdev)
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
otx2_unregister_dl(pf);
unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
......
......@@ -52,6 +52,29 @@ struct otx2_tc_flow {
bool is_act_police;
};
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc))
return 0;
/* Max flows changed, free the existing bitmap */
kfree(tc->tc_entries_bitmap);
tc->tc_entries_bitmap =
kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows),
sizeof(long), GFP_KERNEL);
if (!tc->tc_entries_bitmap) {
netdev_err(nic->netdev,
"Unable to alloc TC flow entries bitmap\n");
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
u32 *burst_mantissa)
{
......@@ -596,6 +619,7 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
int err;
......@@ -638,7 +662,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
kfree_rcu(flow_node, rcu);
clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries--;
flow_cfg->nr_flows--;
return 0;
}
......@@ -647,6 +671,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
struct npc_install_flow_req *req, dummy;
......@@ -655,9 +680,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) {
NL_SET_ERR_MSG_MOD(extack,
"Not enough MCAM space to add the flow");
"Free MCAM entry not available to add the flow");
return -ENOMEM;
}
......@@ -695,10 +720,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
nic->flow_cfg->tc_max_flows);
flow_cfg->max_flows);
req->channel = nic->hw.rx_chan_base;
req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset +
nic->flow_cfg->tc_max_flows - new_node->bitpos];
req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
......@@ -723,7 +747,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries++;
flow_cfg->nr_flows++;
return 0;
......@@ -1008,10 +1032,21 @@ static const struct rhashtable_params tc_flow_ht_params = {
int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
int err;
/* Exclude receive queue 0 being used for police action */
set_bit(0, &nic->rq_bmap);
if (!nic->flow_cfg) {
netdev_err(nic->netdev,
"Can't init TC, nic->flow_cfg is not setup\n");
return -EINVAL;
}
err = otx2_tc_alloc_ent_bitmap(nic);
if (err)
return err;
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
......@@ -1020,5 +1055,6 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
}
......@@ -464,6 +464,28 @@ static void otx2vf_reset_task(struct work_struct *work)
rtnl_unlock();
}
static int otx2vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *vf = netdev_priv(netdev);
if (changed & NETIF_F_NTUPLE) {
if (!ntuple_enabled) {
otx2_mcam_flow_del(vf);
return 0;
}
if (!otx2_get_maxflows(vf->flow_cfg)) {
netdev_err(netdev,
"Can't enable NTUPLE, MCAM entries not allocated\n");
return -EINVAL;
}
}
return 0;
}
static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
......@@ -471,6 +493,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
};
......@@ -627,12 +650,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_HW_VLAN_STAG_TX;
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
/* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
......@@ -658,6 +683,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
otx2vf_set_ethtool_ops(netdev);
err = otx2vf_mcam_flow_init(vf);
if (err)
goto err_unreg_netdev;
err = otx2_register_dl(vf);
if (err)
goto err_unreg_netdev;
/* Enable pause frames by default */
vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
......@@ -695,6 +728,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
cancel_work_sync(&vf->reset_task);
otx2_unregister_dl(vf);
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment