Commit 798a496b authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-af-Debugfs-support-and-updates-to-parser-profile'

Sunil Goutham says:

====================
octeontx2-af: Debugfs support and updates to parser profile

This patchset adds debugfs support to dump various HW state machine info
which helps in debugging issues. Info includes
- Current queue context, stats, resource utilization etc
- MCAM entry utilization, miss and pkt drop counter
- CGX ingress and egress stats
- Current RVU block allocation status
- etc.

Rest patches has changes wrt
- Updated packet parsing profile for parsing more protocols.
- RSS algorithms to include inner protocols while generating hash
- Handle current version of silicon's limitations wrt shaping, coloring
  and fixed mapping of transmit limiter queue's configuration.
- Enable broadcast packet replication to PF and it's VFs.
- Support for configurable NDC cache waymask
- etc

Changes from v1:
   Removed inline keyword for newly introduced APIs in few patches.
   - Suggested by David Miller.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b37fa92e a7faa68b
......@@ -16,3 +16,12 @@ config OCTEONTX2_AF
Unit's admin function manager which manages all RVU HW resources
and provides a medium to other PF/VFs to configure HW. Should be
enabled for other RVU device drivers to work.
config NDC_DIS_DYNAMIC_CACHING
bool "Disable caching of dynamic entries in NDC"
depends on OCTEONTX2_AF
default n
---help---
This config option disables caching of dynamic entries such as NIX SQEs
, NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
NPA Aura/Pool contexts.
......@@ -8,4 +8,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o
rvu_reg.o rvu_npc.o rvu_debugfs.o
......@@ -138,6 +138,16 @@ void *cgx_get_pdata(int cgx_id)
}
EXPORT_SYMBOL(cgx_get_pdata);
int cgx_get_cgxid(void *cgxd)
{
struct cgx *cgx = cgxd;
if (!cgx)
return -EINVAL;
return cgx->cgx_id;
}
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
......@@ -281,6 +291,35 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_promisc_config);
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
if (!cgx)
return;
if (enable) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
} else {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
}
}
EXPORT_SYMBOL(cgx_lmac_enadis_rx_pause_fwding);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
......@@ -321,6 +360,27 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
}
EXPORT_SYMBOL(cgx_lmac_rx_tx_enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg, last;
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
last = cfg;
if (enable)
cfg |= DATA_PKT_TX_EN;
else
cfg &= ~DATA_PKT_TX_EN;
if (cfg != last)
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return !!(last & DATA_PKT_TX_EN);
}
EXPORT_SYMBOL(cgx_lmac_tx_enable);
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
......
......@@ -56,6 +56,11 @@
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGXX_SMUX_RX_FRM_CTL 0x20020
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
#define CGX_CMD_TIMEOUT 2200 /* msecs */
......@@ -63,6 +68,11 @@
#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
enum cgx_nix_stat_type {
NIX_STATS_RX,
NIX_STATS_TX,
};
enum LMAC_TYPE {
LMAC_MODE_SGMII = 0,
LMAC_MODE_XAUI = 1,
......@@ -96,6 +106,7 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
int cgx_get_cgxcnt_max(void);
int cgx_get_cgxid(void *cgxd);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
......@@ -104,9 +115,11 @@ int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
......
......@@ -196,4 +196,20 @@ enum nix_scheduler {
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
/* NDC info */
enum ndc_idx_e {
NIX0_RX = 0x0,
NIX0_TX = 0x1,
NPA0_U = 0x2,
};
enum ndc_ctype_e {
CACHING = 0x0,
BYPASS = 0x1,
};
#define NDC_MAX_PORT 6
#define NDC_READ_TRANS 0
#define NDC_WRITE_TRANS 1
#endif /* COMMON_H */
......@@ -19,17 +19,20 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
tx_hdr = mdev->mbase + mbox->tx_start;
rx_hdr = mdev->mbase + mbox->rx_start;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
spin_lock(&mdev->mbox_lock);
mdev->msg_size = 0;
mdev->rsp_size = 0;
tx_hdr->num_msgs = 0;
tx_hdr->msg_size = 0;
rx_hdr->num_msgs = 0;
rx_hdr->msg_size = 0;
spin_unlock(&mdev->mbox_lock);
}
EXPORT_SYMBOL(otx2_mbox_reset);
......@@ -133,16 +136,17 @@ EXPORT_SYMBOL(otx2_mbox_init);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
int timeout = 0, sleep = 1;
struct device *sender = &mbox->pdev->dev;
while (mdev->num_msgs != mdev->msgs_acked) {
msleep(sleep);
timeout += sleep;
if (timeout >= MBOX_RSP_TIMEOUT)
return -EIO;
while (!time_after(jiffies, timeout)) {
if (mdev->num_msgs == mdev->msgs_acked)
return 0;
usleep_range(800, 1000);
}
return 0;
dev_dbg(sender, "timed out while waiting for rsp\n");
return -EIO;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
......@@ -162,13 +166,25 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
tx_hdr = mdev->mbase + mbox->tx_start;
rx_hdr = mdev->mbase + mbox->rx_start;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
/* If bounce buffer is implemented copy mbox messages from
* bounce buffer to hw mbox memory.
*/
if (mdev->mbase != hw_mbase)
memcpy(hw_mbase + mbox->tx_start + msgs_offset,
mdev->mbase + mbox->tx_start + msgs_offset,
mdev->msg_size);
spin_lock(&mdev->mbox_lock);
tx_hdr->msg_size = mdev->msg_size;
/* Reset header for next messages */
mdev->msg_size = 0;
mdev->rsp_size = 0;
......@@ -215,7 +231,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
/* Clear the whole msg region */
memset(msghdr, 0, sizeof(*msghdr) + size);
memset(msghdr, 0, size);
/* Init message header with reset values */
msghdr->ver = OTX2_MBOX_VERSION;
mdev->msg_size += size;
......@@ -236,8 +252,10 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
u16 msgs;
spin_lock(&mdev->mbox_lock);
if (mdev->num_msgs != mdev->msgs_acked)
return ERR_PTR(-ENODEV);
goto error;
for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
struct mbox_msghdr *pmsg = mdev->mbase + imsg;
......@@ -245,18 +263,55 @@ struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
if (msg == pmsg) {
if (pmsg->id != prsp->id)
return ERR_PTR(-ENODEV);
goto error;
spin_unlock(&mdev->mbox_lock);
return prsp;
}
imsg = pmsg->next_msgoff;
irsp = prsp->next_msgoff;
imsg = mbox->tx_start + pmsg->next_msgoff;
irsp = mbox->rx_start + prsp->next_msgoff;
}
error:
spin_unlock(&mdev->mbox_lock);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL(otx2_mbox_get_rsp);
int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
{
unsigned long ireq = mbox->tx_start + msgs_offset;
unsigned long irsp = mbox->rx_start + msgs_offset;
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
int rc = -ENODEV;
u16 msgs;
spin_lock(&mdev->mbox_lock);
if (mdev->num_msgs != mdev->msgs_acked)
goto exit;
for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
struct mbox_msghdr *preq = mdev->mbase + ireq;
struct mbox_msghdr *prsp = mdev->mbase + irsp;
if (preq->id != prsp->id)
goto exit;
if (prsp->rc) {
rc = prsp->rc;
goto exit;
}
ireq = mbox->tx_start + preq->next_msgoff;
irsp = mbox->rx_start + prsp->next_msgoff;
}
rc = 0;
exit:
spin_unlock(&mdev->mbox_lock);
return rc;
}
EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
int
otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
{
......
......@@ -36,7 +36,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
#define MBOX_RSP_TIMEOUT 1000 /* in ms, Time to wait for mbox response */
#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
......@@ -75,6 +75,7 @@ struct otx2_mbox {
/* Header which preceeds all mbox messages */
struct mbox_hdr {
u64 msg_size; /* Total msgs size embedded */
u16 num_msgs; /* No of msgs embedded */
};
......@@ -103,6 +104,7 @@ struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp);
struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *msg);
int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid);
int otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid,
u16 pcifunc, u16 id);
bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid);
......@@ -125,6 +127,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
......@@ -300,6 +303,12 @@ struct msix_offset_rsp {
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
};
struct get_hw_cap_rsp {
struct mbox_msghdr hdr;
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
};
/* CGX mbox message formats */
struct cgx_stats_rsp {
......@@ -352,6 +361,7 @@ struct npa_lf_alloc_req {
int node;
int aura_sz; /* No of auras */
u32 nr_pools; /* No of pools */
u64 way_mask;
};
struct npa_lf_alloc_rsp {
......@@ -442,6 +452,7 @@ struct nix_lf_alloc_req {
u16 npa_func;
u16 sso_func;
u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */
u64 way_mask;
};
struct nix_lf_alloc_rsp {
......@@ -512,6 +523,9 @@ struct nix_txsch_alloc_rsp {
/* Scheduler queue list allocated at each level */
u16 schq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u8 aggr_level; /* Traffic aggregation scheduler level */
u8 aggr_lvl_rr_prio; /* Aggregation lvl's RR_PRIO config */
u8 link_cfg_lvl; /* LINKX_CFG CSRs mapped to TL3 or TL2's index ? */
};
struct nix_txsch_free_req {
......@@ -578,6 +592,18 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
#define NIX_FLOW_KEY_TYPE_NVGRE BIT(6)
#define NIX_FLOW_KEY_TYPE_VXLAN BIT(7)
#define NIX_FLOW_KEY_TYPE_GENEVE BIT(8)
#define NIX_FLOW_KEY_TYPE_ETH_DMAC BIT(9)
#define NIX_FLOW_KEY_TYPE_IPV6_EXT BIT(10)
#define NIX_FLOW_KEY_TYPE_GTPU BIT(11)
#define NIX_FLOW_KEY_TYPE_INNR_IPV4 BIT(12)
#define NIX_FLOW_KEY_TYPE_INNR_IPV6 BIT(13)
#define NIX_FLOW_KEY_TYPE_INNR_TCP BIT(14)
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
......
......@@ -27,26 +27,45 @@ enum NPC_LID_E {
enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
NPC_LT_LA_IH_NIX_ETHER,
NPC_LT_LA_IH_8_ETHER,
NPC_LT_LA_IH_4_ETHER,
NPC_LT_LA_IH_2_ETHER,
NPC_LT_LA_HIGIG2_ETHER,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
enum npc_kpu_lb_ltype {
NPC_LT_LB_ETAG = 1,
NPC_LT_LB_CTAG,
NPC_LT_LB_STAG,
NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
NPC_LT_LB_QINQ,
NPC_LT_LB_ITAG,
NPC_LT_LB_DSA,
NPC_LT_LB_DSA_VLAN,
NPC_LT_LB_EDSA,
NPC_LT_LB_EDSA_VLAN,
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
enum npc_kpu_lc_ltype {
NPC_LT_LC_IP = 1,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
NPC_LT_LC_ARP,
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
};
/* Don't modify Ltypes upto SCTP, otherwise it will
......@@ -57,49 +76,67 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_UDP,
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
NPC_LT_LD_IGMP,
NPC_LT_LD_ICMP6,
NPC_LT_LD_IGMP = 8,
NPC_LT_LD_ESP,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
NPC_LT_LD_GRE_MPLS,
NPC_LT_LD_GRE_NSH,
NPC_LT_LD_TU_MPLS,
NPC_LT_LD_NVGRE,
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
NPC_LT_LD_CUSTOM0 = 0xE,
NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
NPC_LT_LE_TU_ETHER = 1,
NPC_LT_LE_TU_PPP,
NPC_LT_LE_TU_MPLS_IN_NSH,
NPC_LT_LE_TU_3RD_NSH,
NPC_LT_LE_VXLAN = 1,
NPC_LT_LE_GENEVE,
NPC_LT_LE_GTPU = 4,
NPC_LT_LE_VXLANGPE,
NPC_LT_LE_GTPC,
NPC_LT_LE_NSH,
NPC_LT_LE_TU_MPLS_IN_GRE,
NPC_LT_LE_TU_NSH_IN_GRE,
NPC_LT_LE_TU_MPLS_IN_UDP,
NPC_LT_LE_CUSTOM0 = 0xE,
NPC_LT_LE_CUSTOM1 = 0xF,
};
enum npc_kpu_lf_ltype {
NPC_LT_LF_TU_IP = 1,
NPC_LT_LF_TU_IP6,
NPC_LT_LF_TU_ARP,
NPC_LT_LF_TU_MPLS_IP,
NPC_LT_LF_TU_MPLS_IP6,
NPC_LT_LF_TU_MPLS_ETHER,
NPC_LT_LF_TU_ETHER = 1,
NPC_LT_LF_TU_PPP,
NPC_LT_LF_TU_MPLS_IN_VXLANGPE,
NPC_LT_LF_TU_NSH_IN_VXLANGPE,
NPC_LT_LF_TU_MPLS_IN_NSH,
NPC_LT_LF_TU_3RD_NSH,
NPC_LT_LF_CUSTOM0 = 0xE,
NPC_LT_LF_CUSTOM1 = 0xF,
};
enum npc_kpu_lg_ltype {
NPC_LT_LG_TU_TCP = 1,
NPC_LT_LG_TU_UDP,
NPC_LT_LG_TU_SCTP,
NPC_LT_LG_TU_ICMP,
NPC_LT_LG_TU_IGMP,
NPC_LT_LG_TU_ICMP6,
NPC_LT_LG_TU_ESP,
NPC_LT_LG_TU_AH,
NPC_LT_LG_TU_IP = 1,
NPC_LT_LG_TU_IP6,
NPC_LT_LG_TU_ARP,
NPC_LT_LG_TU_ETHER_IN_NSH,
NPC_LT_LG_CUSTOM0 = 0xE,
NPC_LT_LG_CUSTOM1 = 0xF,
};
/* Don't modify Ltypes upto SCTP, otherwise it will
* effect flow tag calculation and thus RSS.
*/
enum npc_kpu_lh_ltype {
NPC_LT_LH_TCP_DATA = 1,
NPC_LT_LH_HTTP_DATA,
NPC_LT_LH_HTTPS_DATA,
NPC_LT_LH_PPTP_DATA,
NPC_LT_LH_UDP_DATA,
NPC_LT_LH_TU_TCP = 1,
NPC_LT_LH_TU_UDP,
NPC_LT_LH_TU_ICMP,
NPC_LT_LH_TU_SCTP,
NPC_LT_LH_TU_ICMP6,
NPC_LT_LH_TU_IGMP = 8,
NPC_LT_LH_TU_ESP,
NPC_LT_LH_TU_AH,
NPC_LT_LH_CUSTOM0 = 0xE,
NPC_LT_LH_CUSTOM1 = 0xF,
};
struct npc_kpu_profile_cam {
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -56,12 +56,34 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
static void rvu_setup_hw_capabilities(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
hw->cap.nix_tx_aggr_lvl = NIX_TXSCH_LVL_TL1;
hw->cap.nix_fixed_txschq_mapping = false;
hw->cap.nix_shaping = true;
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
if (is_rvu_96xx_B0(rvu)) {
hw->cap.nix_fixed_txschq_mapping = true;
hw->cap.nix_txsch_per_cgx_lmac = 4;
hw->cap.nix_txsch_per_lbk_lmac = 132;
hw->cap.nix_txsch_per_sdp_lmac = 76;
hw->cap.nix_shaping = false;
hw->cap.nix_tx_link_bp = false;
if (is_rvu_96xx_A0(rvu))
hw->cap.nix_rx_multicast = false;
}
}
/* Poll a RVU block's register 'offset', for a 'zero'
* or 'nonzero' at bits specified by 'mask'
*/
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{
unsigned long timeout = jiffies + usecs_to_jiffies(100);
unsigned long timeout = jiffies + usecs_to_jiffies(10000);
void __iomem *reg;
u64 reg_val;
......@@ -73,7 +95,6 @@ int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
if (!zero && (reg_val & mask))
return 0;
usleep_range(1, 5);
timeout--;
}
return -EBUSY;
}
......@@ -433,9 +454,9 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC0, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC1, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC2, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
}
static void rvu_scan_block(struct rvu *rvu, struct rvu_block *block)
......@@ -877,8 +898,8 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
return 0;
}
static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
return 0;
}
......@@ -1023,9 +1044,9 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
return 0;
}
static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
struct rsrc_detach *detach,
struct msg_rsp *rsp)
int rvu_mbox_handler_detach_resources(struct rvu *rvu,
struct rsrc_detach *detach,
struct msg_rsp *rsp)
{
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
......@@ -1171,9 +1192,9 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
return -ENOSPC;
}
static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
{
u16 pcifunc = attach->hdr.pcifunc;
int err;
......@@ -1294,8 +1315,8 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct msix_offset_rsp *rsp)
int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
......@@ -1343,8 +1364,8 @@ static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
return 0;
}
static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
u16 vf, numvfs;
......@@ -1363,6 +1384,17 @@ static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
rsp->nix_shaping = hw->cap.nix_shaping;
return 0;
}
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
......@@ -1440,12 +1472,12 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
/* Process received mbox messages */
req_hdr = mdev->mbase + mbox->rx_start;
if (req_hdr->num_msgs == 0)
if (mw->mbox_wrk[devid].num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < req_hdr->num_msgs; id++) {
for (id = 0; id < mw->mbox_wrk[devid].num_msgs; id++) {
msg = mdev->mbase + offset;
/* Set which PF/VF sent this message based on mbox IRQ */
......@@ -1471,13 +1503,14 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid,
msg->id, rvu_get_pf(msg->pcifunc),
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
err, otx2_mbox_id2name(msg->id),
msg->id, devid);
}
mw->mbox_wrk[devid].num_msgs = 0;
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
......@@ -1523,14 +1556,14 @@ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
mdev = &mbox->dev[devid];
rsp_hdr = mdev->mbase + mbox->rx_start;
if (rsp_hdr->num_msgs == 0) {
if (mw->mbox_wrk_up[devid].up_num_msgs == 0) {
dev_warn(rvu->dev, "mbox up handler: num_msgs = 0\n");
return;
}
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < rsp_hdr->num_msgs; id++) {
for (id = 0; id < mw->mbox_wrk_up[devid].up_num_msgs; id++) {
msg = mdev->mbase + offset;
if (msg->id >= MBOX_MSG_MAX) {
......@@ -1560,6 +1593,7 @@ static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
offset = mbox->rx_start + msg->next_msgoff;
mdev->msgs_acked++;
}
mw->mbox_wrk_up[devid].up_num_msgs = 0;
otx2_mbox_reset(mbox, devid);
}
......@@ -1697,14 +1731,28 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs)
queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
/*The hdr->num_msgs is set to zero immediately in the interrupt
* handler to ensure that it holds a correct value next time
* when the interrupt handler is called.
* pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
* pf>mbox.up_num_msgs holds the data for use in
* pfaf_mbox_up_handler.
*/
if (hdr->num_msgs) {
mw->mbox_wrk[i].num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
}
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs)
if (hdr->num_msgs) {
mw->mbox_wrk_up[i].up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
}
}
}
......@@ -2316,18 +2364,6 @@ static int rvu_enable_sriov(struct rvu *rvu)
if (vfs > chans)
vfs = chans;
/* AF's VFs work in pairs and talk over consecutive loopback channels.
* Thus we want to enable maximum even number of VFs. In case
* odd number of VFs are available then the last VF on the list
* remains disabled.
*/
if (vfs & 0x1) {
dev_warn(&pdev->dev,
"Number of VFs should be even. Enabling %d out of %d.\n",
vfs - 1, vfs);
vfs--;
}
if (!vfs)
return 0;
......@@ -2432,6 +2468,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rvu_reset_all_blocks(rvu);
rvu_setup_hw_capabilities(rvu);
err = rvu_setup_hw_resources(rvu);
if (err)
goto err_release_regions;
......@@ -2456,6 +2494,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_irq;
/* Initialize debugfs */
rvu_dbg_init(rvu);
return 0;
err_irq:
rvu_unregister_interrupts(rvu);
......@@ -2482,6 +2523,7 @@ static void rvu_remove(struct pci_dev *pdev)
{
struct rvu *rvu = pci_get_drvdata(pdev);
rvu_dbg_exit(rvu);
rvu_unregister_interrupts(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
......
......@@ -35,9 +35,36 @@
#define RVU_PFVF_FUNC_SHIFT 0
#define RVU_PFVF_FUNC_MASK 0x3FF
#ifdef CONFIG_DEBUG_FS
struct dump_ctx {
int lf;
int id;
bool all;
};
struct rvu_debugfs {
struct dentry *root;
struct dentry *cgx_root;
struct dentry *cgx;
struct dentry *lmac;
struct dentry *npa;
struct dentry *nix;
struct dentry *npc;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
int npa_qsize_id;
int nix_qsize_id;
};
#endif
struct rvu_work {
struct work_struct work;
struct rvu *rvu;
int num_msgs;
int up_num_msgs;
};
struct rsrc_bmap {
......@@ -99,6 +126,7 @@ struct npc_mcam {
u16 lprio_start;
u16 hprio_count;
u16 hprio_end;
u16 rx_miss_act_cntr; /* Counter for RX MISS action */
};
/* Structure for per RVU func info ie PF/VF */
......@@ -151,15 +179,20 @@ struct rvu_pfvf {
struct mcam_entry entry;
int rxvlan_index;
bool rxvlan;
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
#define NIX_TXSCHQ_FREE BIT_ULL(1)
#define NIX_TXSCHQ_CFG_DONE BIT_ULL(0)
#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
#define TXSCH_SET_FLAG(__pfvf_map, flag) ((__pfvf_map) | ((flag) << 16))
u32 *pfvf_map;
};
......@@ -193,6 +226,21 @@ struct nix_hw {
struct nix_lso lso;
};
/* RVU block's capabilities or functionality,
* which vary by silicon version/skew.
*/
struct hw_cap {
/* Transmit side supported functionality */
u8 nix_tx_aggr_lvl; /* Tx link's traffic aggregation level */
u16 nix_txsch_per_cgx_lmac; /* Max Q's transmitting to CGX LMAC */
u16 nix_txsch_per_lbk_lmac; /* Max Q's transmitting to LBK LMAC */
u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */
bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
bool nix_shaping; /* Is shaping and coloring supported */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */
};
struct rvu_hwinfo {
u8 total_pfs; /* MAX RVU PFs HW supports */
u16 total_vfs; /* Max RVU VFs HW supports */
......@@ -204,7 +252,7 @@ struct rvu_hwinfo {
u8 sdp_links;
u8 npc_kpus; /* No of parser units */
struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
struct nix_hw *nix0;
struct npc_pkind pkind;
......@@ -261,8 +309,13 @@ struct rvu {
struct workqueue_struct *cgx_evh_wq;
spinlock_t cgx_evq_lock; /* cgx event queue lock */
struct list_head cgx_evq_head; /* cgx event queue head */
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
......@@ -285,7 +338,8 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
/* Silicon revisions */
static inline bool is_rvu_96xx_A0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
......@@ -293,6 +347,14 @@ static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
}
static inline bool is_rvu_96xx_B0(struct rvu *rvu)
{
struct pci_dev *pdev = rvu->pdev;
return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) &&
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
}
/* Function Prototypes
* RVU
*/
......@@ -342,52 +404,25 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
#define M(_name, _id, fn_name, req, rsp) \
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
#undef M
int rvu_cgx_init(struct rvu *rvu);
int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp);
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp);
int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp);
int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
/* NIX APIs */
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
......@@ -397,55 +432,7 @@ int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp);
int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp);
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp);
int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
struct nix_rss_flowkey_cfg_rsp *rsp);
int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
struct nix_mark_format_cfg *req,
struct nix_mark_format_cfg_rsp *rsp);
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
......@@ -460,45 +447,25 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc);
int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp);
int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
struct npc_mcam_free_entry_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
struct npc_mcam_write_entry_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
struct npc_mcam_ena_dis_entry_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
struct npc_mcam_ena_dis_entry_req *req,
struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
struct npc_mcam_shift_entry_req *req,
struct npc_mcam_shift_entry_rsp *rsp);
int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
struct npc_mcam_alloc_counter_req *req,
struct npc_mcam_alloc_counter_rsp *rsp);
int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
struct npc_mcam_oper_counter_req *req,
struct npc_mcam_oper_counter_rsp *rsp);
int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam_alloc_and_write_entry_req *req,
struct npc_mcam_alloc_and_write_entry_rsp *rsp);
int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
struct npc_get_kex_cfg_rsp *rsp);
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);
#else
static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
#endif /* RVU_H */
......@@ -14,6 +14,7 @@
#include "rvu.h"
#include "cgx.h"
#include "rvu_reg.h"
struct cgx_evq_entry {
struct list_head evq_node;
......@@ -40,12 +41,25 @@ MBOX_UP_CGX_MESSAGES
#undef M
/* Returns bitmap of mapped PFs */
static inline u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
/* Assumes only one pf mapped to a cgx lmac port */
if (!pfmap)
return -ENODEV;
else
return find_first_bit(&pfmap, 16);
}
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
{
return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
}
......@@ -294,6 +308,8 @@ int rvu_cgx_init(struct rvu *rvu)
if (err)
return err;
mutex_init(&rvu->cgx_cfg_lock);
/* Ensure event handler registration is completed, before
* we turn on the links
*/
......@@ -334,6 +350,24 @@ int rvu_cgx_exit(struct rvu *rvu)
return 0;
}
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
u8 cgx_id, lmac_id;
void *cgxd;
if (!is_pf_cgxmapped(rvu, pf))
return;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
if (enable)
cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
else
cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
}
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
......@@ -562,3 +596,94 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
return 0;
}
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
int index, int rxtxflag, u64 *stat)
{
struct rvu_block *block;
int blkaddr;
u16 pcifunc;
int pf, lf;
if (!cgxd || !rvu)
return -EINVAL;
pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
if (pf < 0)
return pf;
/* Assumes LF of a PF and all of its VF belongs to the same
* NIX block
*/
pcifunc = pf << RVU_PFVF_PF_SHIFT;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
block = &rvu->hw->block[blkaddr];
*stat = 0;
for (lf = 0; lf < block->lf.max; lf++) {
/* Check if a lf is attached to this PF or one of its VFs */
if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
~RVU_PFVF_FUNC_MASK)))
continue;
if (rxtxflag == NIX_STATS_RX)
*stat += rvu_read64(rvu, blkaddr,
NIX_AF_LFX_RX_STATX(lf, index));
else
*stat += rvu_read64(rvu, blkaddr,
NIX_AF_LFX_TX_STATX(lf, index));
}
return 0;
}
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
{
struct rvu_pfvf *parent_pf, *pfvf;
int cgx_users, err = 0;
if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
return 0;
parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
pfvf = rvu_get_pfvf(rvu, pcifunc);
mutex_lock(&rvu->cgx_cfg_lock);
if (start && pfvf->cgx_in_use)
goto exit; /* CGX is already started hence nothing to do */
if (!start && !pfvf->cgx_in_use)
goto exit; /* CGX is already stopped hence nothing to do */
if (start) {
cgx_users = parent_pf->cgx_users;
parent_pf->cgx_users++;
} else {
parent_pf->cgx_users--;
cgx_users = parent_pf->cgx_users;
}
/* Start CGX when first of all NIXLFs is started.
* Stop CGX when last of all NIXLFs is stopped.
*/
if (!cgx_users) {
err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
start);
if (err) {
dev_err(rvu->dev, "Unable to %s CGX\n",
start ? "start" : "stop");
/* Revert the usage count in case of error */
parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
: parent_pf->cgx_users + 1;
goto exit;
}
}
pfvf->cgx_in_use = start;
exit:
mutex_unlock(&rvu->cgx_cfg_lock);
return err;
}
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2019 Marvell International Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "rvu_struct.h"
#include "rvu_reg.h"
#include "rvu.h"
#include "cgx.h"
#include "npc.h"
#define DEBUGFS_DIR_NAME "octeontx2"
enum {
CGX_STAT0,
CGX_STAT1,
CGX_STAT2,
CGX_STAT3,
CGX_STAT4,
CGX_STAT5,
CGX_STAT6,
CGX_STAT7,
CGX_STAT8,
CGX_STAT9,
CGX_STAT10,
CGX_STAT11,
CGX_STAT12,
CGX_STAT13,
CGX_STAT14,
CGX_STAT15,
CGX_STAT16,
CGX_STAT17,
CGX_STAT18,
};
/* NIX TX stats */
enum nix_stat_lf_tx {
TX_UCAST = 0x0,
TX_BCAST = 0x1,
TX_MCAST = 0x2,
TX_DROP = 0x3,
TX_OCTS = 0x4,
TX_STATS_ENUM_LAST,
};
/* NIX RX stats */
enum nix_stat_lf_rx {
RX_OCTS = 0x0,
RX_UCAST = 0x1,
RX_BCAST = 0x2,
RX_MCAST = 0x3,
RX_DROP = 0x4,
RX_DROP_OCTS = 0x5,
RX_FCS = 0x6,
RX_ERR = 0x7,
RX_DRP_BCAST = 0x8,
RX_DRP_MCAST = 0x9,
RX_DRP_L3BCAST = 0xa,
RX_DRP_L3MCAST = 0xb,
RX_STATS_ENUM_LAST,
};
static char *cgx_rx_stats_fields[] = {
[CGX_STAT0] = "Received packets",
[CGX_STAT1] = "Octets of received packets",
[CGX_STAT2] = "Received PAUSE packets",
[CGX_STAT3] = "Received PAUSE and control packets",
[CGX_STAT4] = "Filtered DMAC0 (NIX-bound) packets",
[CGX_STAT5] = "Filtered DMAC0 (NIX-bound) octets",
[CGX_STAT6] = "Packets dropped due to RX FIFO full",
[CGX_STAT7] = "Octets dropped due to RX FIFO full",
[CGX_STAT8] = "Error packets",
[CGX_STAT9] = "Filtered DMAC1 (NCSI-bound) packets",
[CGX_STAT10] = "Filtered DMAC1 (NCSI-bound) octets",
[CGX_STAT11] = "NCSI-bound packets dropped",
[CGX_STAT12] = "NCSI-bound octets dropped",
};
static char *cgx_tx_stats_fields[] = {
[CGX_STAT0] = "Packets dropped due to excessive collisions",
[CGX_STAT1] = "Packets dropped due to excessive deferral",
[CGX_STAT2] = "Multiple collisions before successful transmission",
[CGX_STAT3] = "Single collisions before successful transmission",
[CGX_STAT4] = "Total octets sent on the interface",
[CGX_STAT5] = "Total frames sent on the interface",
[CGX_STAT6] = "Packets sent with an octet count < 64",
[CGX_STAT7] = "Packets sent with an octet count == 64",
[CGX_STAT8] = "Packets sent with an octet count of 65–127",
[CGX_STAT9] = "Packets sent with an octet count of 128-255",
[CGX_STAT10] = "Packets sent with an octet count of 256-511",
[CGX_STAT11] = "Packets sent with an octet count of 512-1023",
[CGX_STAT12] = "Packets sent with an octet count of 1024-1518",
[CGX_STAT13] = "Packets sent with an octet count of > 1518",
[CGX_STAT14] = "Packets sent to a broadcast DMAC",
[CGX_STAT15] = "Packets sent to the multicast DMAC",
[CGX_STAT16] = "Transmit underflow and were truncated",
[CGX_STAT17] = "Control/PAUSE packets sent",
};
#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
blk_addr, NDC_AF_CONST) & 0xFF)
#define rvu_dbg_NULL NULL
#define rvu_dbg_open_NULL NULL
#define RVU_DEBUG_SEQ_FOPS(name, read_op, write_op) \
static int rvu_dbg_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, rvu_dbg_##read_op, inode->i_private); \
} \
static const struct file_operations rvu_dbg_##name##_fops = { \
.owner = THIS_MODULE, \
.open = rvu_dbg_open_##name, \
.read = seq_read, \
.write = rvu_dbg_##write_op, \
.llseek = seq_lseek, \
.release = single_release, \
}
#define RVU_DEBUG_FOPS(name, read_op, write_op) \
static const struct file_operations rvu_dbg_##name##_fops = { \
.owner = THIS_MODULE, \
.open = simple_open, \
.read = rvu_dbg_##read_op, \
.write = rvu_dbg_##write_op \
}
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
/* Dumps current provisioning status of all RVU block LFs */
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
int index, off = 0, flag = 0, go_back = 0, off_prev;
struct rvu *rvu = filp->private_data;
int lf, pf, vf, pcifunc;
struct rvu_block block;
int bytes_not_copied;
int buf_size = 2048;
char *buf;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOSPC;
off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
for (index = 0; index < BLK_COUNT; index++)
if (strlen(rvu->hw->block[index].name))
off += scnprintf(&buf[off], buf_size - 1 - off,
"%*s\t", (index - 1) * 2,
rvu->hw->block[index].name);
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
pcifunc = pf << 10 | vf;
if (!pcifunc)
continue;
if (vf) {
go_back = scnprintf(&buf[off],
buf_size - 1 - off,
"PF%d:VF%d\t\t", pf,
vf - 1);
} else {
go_back = scnprintf(&buf[off],
buf_size - 1 - off,
"PF%d\t\t", pf);
}
off += go_back;
for (index = 0; index < BLKTYPE_MAX; index++) {
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
off_prev = off;
for (lf = 0; lf < block.lf.max; lf++) {
if (block.fn_map[lf] != pcifunc)
continue;
flag = 1;
off += scnprintf(&buf[off], buf_size - 1
- off, "%3d,", lf);
}
if (flag && off_prev != off)
off--;
else
go_back++;
off += scnprintf(&buf[off], buf_size - 1 - off,
"\t");
}
if (!flag)
off -= go_back;
else
flag = 0;
off--;
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
}
}
bytes_not_copied = copy_to_user(buffer, buf, off);
kfree(buf);
if (bytes_not_copied)
return -EFAULT;
*ppos = off;
return off;
}
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
u16 *pcifunc)
{
struct rvu_block *block;
struct rvu_hwinfo *hw;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
if (blkaddr < 0) {
dev_warn(rvu->dev, "Invalid blktype\n");
return false;
}
hw = rvu->hw;
block = &hw->block[blkaddr];
if (lf < 0 || lf >= block->lf.max) {
dev_warn(rvu->dev, "Invalid LF: valid range: 0-%d\n",
block->lf.max - 1);
return false;
}
*pcifunc = block->fn_map[lf];
if (!*pcifunc) {
dev_warn(rvu->dev,
"This LF is not attached to any RVU PFFUNC\n");
return false;
}
return true;
}
static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf)
{
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return;
if (!pfvf->aura_ctx) {
seq_puts(m, "Aura context is not initialized\n");
} else {
bitmap_print_to_pagebuf(false, buf, pfvf->aura_bmap,
pfvf->aura_ctx->qsize);
seq_printf(m, "Aura count : %d\n", pfvf->aura_ctx->qsize);
seq_printf(m, "Aura context ena/dis bitmap : %s\n", buf);
}
if (!pfvf->pool_ctx) {
seq_puts(m, "Pool context is not initialized\n");
} else {
bitmap_print_to_pagebuf(false, buf, pfvf->pool_bmap,
pfvf->pool_ctx->qsize);
seq_printf(m, "Pool count : %d\n", pfvf->pool_ctx->qsize);
seq_printf(m, "Pool context ena/dis bitmap : %s\n", buf);
}
kfree(buf);
}
/* The 'qsize' entry dumps current Aura/Pool context Qsize
* and each context's current enable/disable status in a bitmap.
*/
static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
int blktype)
{
void (*print_qsize)(struct seq_file *filp,
struct rvu_pfvf *pfvf) = NULL;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
rvu = filp->private;
switch (blktype) {
case BLKTYPE_NPA:
qsize_id = rvu->rvu_dbg.npa_qsize_id;
print_qsize = print_npa_qsize;
break;
case BLKTYPE_NIX:
qsize_id = rvu->rvu_dbg.nix_qsize_id;
print_qsize = print_nix_qsize;
break;
default:
return -EINVAL;
}
if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
print_qsize(filp, pfvf);
return 0;
}
static ssize_t rvu_dbg_qsize_write(struct file *filp,
const char __user *buffer, size_t count,
loff_t *ppos, int blktype)
{
char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix";
struct seq_file *seqfile = filp->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
u16 pcifunc;
int ret, lf;
cmd_buf = memdup_user(buffer, count);
if (IS_ERR(cmd_buf))
return -ENOMEM;
cmd_buf[count] = '\0';
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
count = cmd_buf_tmp - cmd_buf + 1;
}
cmd_buf_tmp = cmd_buf;
subtoken = strsep(&cmd_buf, " ");
ret = subtoken ? kstrtoint(subtoken, 10, &lf) : -EINVAL;
if (cmd_buf)
ret = -EINVAL;
if (!strncmp(subtoken, "help", 4) || ret < 0) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
ret = -EINVAL;
goto qsize_write_done;
}
if (blktype == BLKTYPE_NPA)
rvu->rvu_dbg.npa_qsize_id = lf;
else
rvu->rvu_dbg.nix_qsize_id = lf;
qsize_write_done:
kfree(cmd_buf_tmp);
return ret ? ret : count;
}
static ssize_t rvu_dbg_npa_qsize_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
return rvu_dbg_qsize_write(filp, buffer, count, ppos,
BLKTYPE_NPA);
}
static int rvu_dbg_npa_qsize_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NPA);
}
RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
/* Dumps given NPA Aura's context */
static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_aura_s *aura = &rsp->aura;
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
seq_printf(m, "W1: ena\t\t\t%d\nW1: pool caching\t%d\n",
aura->ena, aura->pool_caching);
seq_printf(m, "W1: pool way mask\t%d\nW1: avg con\t\t%d\n",
aura->pool_way_mask, aura->avg_con);
seq_printf(m, "W1: pool drop ena\t%d\nW1: aura drop ena\t%d\n",
aura->pool_drop_ena, aura->aura_drop_ena);
seq_printf(m, "W1: bp_ena\t\t%d\nW1: aura drop\t\t%d\n",
aura->bp_ena, aura->aura_drop);
seq_printf(m, "W1: aura shift\t\t%d\nW1: avg_level\t\t%d\n",
aura->shift, aura->avg_level);
seq_printf(m, "W2: count\t\t%llu\nW2: nix0_bpid\t\t%d\nW2: nix1_bpid\t\t%d\n",
(u64)aura->count, aura->nix0_bpid, aura->nix1_bpid);
seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
(u64)aura->limit, aura->bp, aura->fc_ena);
seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
aura->fc_up_crossing, aura->fc_stype);
seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
seq_printf(m, "W4: fc_addr\t\t%llx\n", aura->fc_addr);
seq_printf(m, "W5: pool_drop\t\t%d\nW5: update_time\t\t%d\n",
aura->pool_drop, aura->update_time);
seq_printf(m, "W5: err_int \t\t%d\nW5: err_int_ena\t\t%d\n",
aura->err_int, aura->err_int_ena);
seq_printf(m, "W5: thresh_int\t\t%d\nW5: thresh_int_ena \t%d\n",
aura->thresh_int, aura->thresh_int_ena);
seq_printf(m, "W5: thresh_up\t\t%d\nW5: thresh_qint_idx\t%d\n",
aura->thresh_up, aura->thresh_qint_idx);
seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
}
/* Dumps given NPA Pool's context */
static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_pool_s *pool = &rsp->pool;
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
seq_printf(m, "W1: ena \t\t%d\nW1: nat_align \t\t%d\n",
pool->ena, pool->nat_align);
seq_printf(m, "W1: stack_caching\t%d\nW1: stack_way_mask\t%d\n",
pool->stack_caching, pool->stack_way_mask);
seq_printf(m, "W1: buf_offset\t\t%d\nW1: buf_size\t\t%d\n",
pool->buf_offset, pool->buf_size);
seq_printf(m, "W2: stack_max_pages \t%d\nW2: stack_pages\t\t%d\n",
pool->stack_max_pages, pool->stack_pages);
seq_printf(m, "W3: op_pc \t\t%llu\n", (u64)pool->op_pc);
seq_printf(m, "W4: stack_offset\t%d\nW4: shift\t\t%d\nW4: avg_level\t\t%d\n",
pool->stack_offset, pool->shift, pool->avg_level);
seq_printf(m, "W4: avg_con \t\t%d\nW4: fc_ena\t\t%d\nW4: fc_stype\t\t%d\n",
pool->avg_con, pool->fc_ena, pool->fc_stype);
seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
pool->fc_hyst_bits, pool->fc_up_crossing);
seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
seq_printf(m, "W6: ptr_start\t\t%llx\n", pool->ptr_start);
seq_printf(m, "W7: ptr_end\t\t%llx\n", pool->ptr_end);
seq_printf(m, "W8: err_int\t\t%d\nW8: err_int_ena\t\t%d\n",
pool->err_int, pool->err_int_ena);
seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
pool->thresh_int_ena, pool->thresh_up);
seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
pool->thresh_qint_idx, pool->err_qint_idx);
}
/* Reads aura/pool's ctx from admin queue */
static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
{
void (*print_npa_ctx)(struct seq_file *m, struct npa_aq_enq_rsp *rsp);
struct npa_aq_enq_req aq_req;
struct npa_aq_enq_rsp rsp;
struct rvu_pfvf *pfvf;
int aura, rc, max_id;
int npalf, id, all;
struct rvu *rvu;
u16 pcifunc;
rvu = m->private;
switch (ctype) {
case NPA_AQ_CTYPE_AURA:
npalf = rvu->rvu_dbg.npa_aura_ctx.lf;
id = rvu->rvu_dbg.npa_aura_ctx.id;
all = rvu->rvu_dbg.npa_aura_ctx.all;
break;
case NPA_AQ_CTYPE_POOL:
npalf = rvu->rvu_dbg.npa_pool_ctx.lf;
id = rvu->rvu_dbg.npa_pool_ctx.id;
all = rvu->rvu_dbg.npa_pool_ctx.all;
break;
default:
return -EINVAL;
}
if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (ctype == NPA_AQ_CTYPE_AURA && !pfvf->aura_ctx) {
seq_puts(m, "Aura context is not initialized\n");
return -EINVAL;
} else if (ctype == NPA_AQ_CTYPE_POOL && !pfvf->pool_ctx) {
seq_puts(m, "Pool context is not initialized\n");
return -EINVAL;
}
memset(&aq_req, 0, sizeof(struct npa_aq_enq_req));
aq_req.hdr.pcifunc = pcifunc;
aq_req.ctype = ctype;
aq_req.op = NPA_AQ_INSTOP_READ;
if (ctype == NPA_AQ_CTYPE_AURA) {
max_id = pfvf->aura_ctx->qsize;
print_npa_ctx = print_npa_aura_ctx;
} else {
max_id = pfvf->pool_ctx->qsize;
print_npa_ctx = print_npa_pool_ctx;
}
if (id < 0 || id >= max_id) {
seq_printf(m, "Invalid %s, valid range is 0-%d\n",
(ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
max_id - 1);
return -EINVAL;
}
if (all)
id = 0;
else
max_id = id + 1;
for (aura = id; aura < max_id; aura++) {
aq_req.aura_id = aura;
seq_printf(m, "======%s : %d=======\n",
(ctype == NPA_AQ_CTYPE_AURA) ? "AURA" : "POOL",
aq_req.aura_id);
rc = rvu_npa_aq_enq_inst(rvu, &aq_req, &rsp);
if (rc) {
seq_puts(m, "Failed to read context\n");
return -EINVAL;
}
print_npa_ctx(m, &rsp);
}
return 0;
}
static int write_npa_ctx(struct rvu *rvu, bool all,
int npalf, int id, int ctype)
{
struct rvu_pfvf *pfvf;
int max_id = 0;
u16 pcifunc;
if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (ctype == NPA_AQ_CTYPE_AURA) {
if (!pfvf->aura_ctx) {
dev_warn(rvu->dev, "Aura context is not initialized\n");
return -EINVAL;
}
max_id = pfvf->aura_ctx->qsize;
} else if (ctype == NPA_AQ_CTYPE_POOL) {
if (!pfvf->pool_ctx) {
dev_warn(rvu->dev, "Pool context is not initialized\n");
return -EINVAL;
}
max_id = pfvf->pool_ctx->qsize;
}
if (id < 0 || id >= max_id) {
dev_warn(rvu->dev, "Invalid %s, valid range is 0-%d\n",
(ctype == NPA_AQ_CTYPE_AURA) ? "aura" : "pool",
max_id - 1);
return -EINVAL;
}
switch (ctype) {
case NPA_AQ_CTYPE_AURA:
rvu->rvu_dbg.npa_aura_ctx.lf = npalf;
rvu->rvu_dbg.npa_aura_ctx.id = id;
rvu->rvu_dbg.npa_aura_ctx.all = all;
break;
case NPA_AQ_CTYPE_POOL:
rvu->rvu_dbg.npa_pool_ctx.lf = npalf;
rvu->rvu_dbg.npa_pool_ctx.id = id;
rvu->rvu_dbg.npa_pool_ctx.all = all;
break;
default:
return -EINVAL;
}
return 0;
}
static int parse_cmd_buffer_ctx(char *cmd_buf, size_t *count,
const char __user *buffer, int *npalf,
int *id, bool *all)
{
int bytes_not_copied;
char *cmd_buf_tmp;
char *subtoken;
int ret;
bytes_not_copied = copy_from_user(cmd_buf, buffer, *count);
if (bytes_not_copied)
return -EFAULT;
cmd_buf[*count] = '\0';
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
*count = cmd_buf_tmp - cmd_buf + 1;
}
subtoken = strsep(&cmd_buf, " ");
ret = subtoken ? kstrtoint(subtoken, 10, npalf) : -EINVAL;
if (ret < 0)
return ret;
subtoken = strsep(&cmd_buf, " ");
if (subtoken && strcmp(subtoken, "all") == 0) {
*all = true;
} else {
ret = subtoken ? kstrtoint(subtoken, 10, id) : -EINVAL;
if (ret < 0)
return ret;
}
if (cmd_buf)
return -EINVAL;
return ret;
}
static ssize_t rvu_dbg_npa_ctx_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos, int ctype)
{
char *cmd_buf, *ctype_string = (ctype == NPA_AQ_CTYPE_AURA) ?
"aura" : "pool";
struct seq_file *seqfp = filp->private_data;
struct rvu *rvu = seqfp->private;
int npalf, id = 0, ret;
bool all = false;
if ((*ppos != 0) || !count)
return -EINVAL;
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
&npalf, &id, &all);
if (ret < 0) {
dev_info(rvu->dev,
"Usage: echo <npalf> [%s number/all] > %s_ctx\n",
ctype_string, ctype_string);
goto done;
} else {
ret = write_npa_ctx(rvu, all, npalf, id, ctype);
}
done:
kfree(cmd_buf);
return ret ? ret : count;
}
static ssize_t rvu_dbg_npa_aura_ctx_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
NPA_AQ_CTYPE_AURA);
}
static int rvu_dbg_npa_aura_ctx_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_AURA);
}
RVU_DEBUG_SEQ_FOPS(npa_aura_ctx, npa_aura_ctx_display, npa_aura_ctx_write);
static ssize_t rvu_dbg_npa_pool_ctx_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
return rvu_dbg_npa_ctx_write(filp, buffer, count, ppos,
NPA_AQ_CTYPE_POOL);
}
static int rvu_dbg_npa_pool_ctx_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_npa_ctx_display(filp, unused, NPA_AQ_CTYPE_POOL);
}
RVU_DEBUG_SEQ_FOPS(npa_pool_ctx, npa_pool_ctx_display, npa_pool_ctx_write);
static void ndc_cache_stats(struct seq_file *s, int blk_addr,
int ctype, int transaction)
{
u64 req, out_req, lat, cant_alloc;
struct rvu *rvu = s->private;
int port;
for (port = 0; port < NDC_MAX_PORT; port++) {
req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
(port, ctype, transaction));
lat = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_LAT_PC
(port, ctype, transaction));
out_req = rvu_read64(rvu, blk_addr,
NDC_AF_PORTX_RTX_RWX_OSTDN_PC
(port, ctype, transaction));
cant_alloc = rvu_read64(rvu, blk_addr,
NDC_AF_PORTX_RTX_CANT_ALLOC_PC
(port, transaction));
seq_printf(s, "\nPort:%d\n", port);
seq_printf(s, "\tTotal Requests:\t\t%lld\n", req);
seq_printf(s, "\tTotal Time Taken:\t%lld cycles\n", lat);
seq_printf(s, "\tAvg Latency:\t\t%lld cycles\n", lat / req);
seq_printf(s, "\tOutstanding Requests:\t%lld\n", out_req);
seq_printf(s, "\tCant Alloc Requests:\t%lld\n", cant_alloc);
}
}
static int ndc_blk_cache_stats(struct seq_file *s, int idx, int blk_addr)
{
seq_puts(s, "\n***** CACHE mode read stats *****\n");
ndc_cache_stats(s, blk_addr, CACHING, NDC_READ_TRANS);
seq_puts(s, "\n***** CACHE mode write stats *****\n");
ndc_cache_stats(s, blk_addr, CACHING, NDC_WRITE_TRANS);
seq_puts(s, "\n***** BY-PASS mode read stats *****\n");
ndc_cache_stats(s, blk_addr, BYPASS, NDC_READ_TRANS);
seq_puts(s, "\n***** BY-PASS mode write stats *****\n");
ndc_cache_stats(s, blk_addr, BYPASS, NDC_WRITE_TRANS);
return 0;
}
static int rvu_dbg_npa_ndc_cache_display(struct seq_file *filp, void *unused)
{
return ndc_blk_cache_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
}
RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
{
struct rvu *rvu = s->private;
int bank, max_bank;
max_bank = NDC_MAX_BANK(rvu, blk_addr);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
seq_printf(s, "\tHits:\t%lld\n",
(u64)rvu_read64(rvu, blk_addr,
NDC_AF_BANKX_HIT_PC(bank)));
seq_printf(s, "\tMiss:\t%lld\n",
(u64)rvu_read64(rvu, blk_addr,
NDC_AF_BANKX_MISS_PC(bank)));
}
return 0;
}
static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
{
return ndc_blk_cache_stats(filp, NIX0_RX,
BLKADDR_NDC_NIX0_RX);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
{
return ndc_blk_cache_stats(filp, NIX0_TX,
BLKADDR_NDC_NIX0_TX);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
static int rvu_dbg_npa_ndc_hits_miss_display(struct seq_file *filp,
void *unused)
{
return ndc_blk_hits_miss_stats(filp, NPA0_U, BLKADDR_NDC_NPA0);
}
RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
void *unused)
{
return ndc_blk_hits_miss_stats(filp,
NPA0_U, BLKADDR_NDC_NIX0_RX);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
void *unused)
{
return ndc_blk_hits_miss_stats(filp,
NPA0_U, BLKADDR_NDC_NIX0_TX);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
sq_ctx->sdp_mcast, sq_ctx->substream);
seq_printf(m, "W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n\n",
sq_ctx->qint_idx, sq_ctx->ena);
seq_printf(m, "W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d\n",
sq_ctx->sqb_count, sq_ctx->default_chan);
seq_printf(m, "W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d\n",
sq_ctx->smq_rr_quantum, sq_ctx->sso_ena);
seq_printf(m, "W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n\n",
sq_ctx->xoff, sq_ctx->cq_ena, sq_ctx->smq);
seq_printf(m, "W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d\n",
sq_ctx->sqe_stype, sq_ctx->sq_int_ena);
seq_printf(m, "W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d\n",
sq_ctx->sq_int, sq_ctx->sqb_aura);
seq_printf(m, "W2: smq_rr_count \t\t%d\n\n", sq_ctx->smq_rr_count);
seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
seq_printf(m, "W3: smenq_next_sqb_vld \t\t%d\nW3: head_offset\t\t\t%d\n",
sq_ctx->smenq_next_sqb_vld, sq_ctx->head_offset);
seq_printf(m, "W3: smenq_offset\t\t%d\nW3: tail_offset\t\t\t%d\n",
sq_ctx->smenq_offset, sq_ctx->tail_offset);
seq_printf(m, "W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq\t\t\t%d\n",
sq_ctx->smq_lso_segnum, sq_ctx->smq_next_sq);
seq_printf(m, "W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d\n",
sq_ctx->mnq_dis, sq_ctx->lmt_dis);
seq_printf(m, "W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n\n",
sq_ctx->cq_limit, sq_ctx->max_sqe_size);
seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
sq_ctx->smenq_next_sqb);
seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
seq_printf(m, "W9: vfi_lso_vld\t\t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d\n",
sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
seq_printf(m, "W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d\n",
sq_ctx->vfi_lso_vlan0_ins_ena, sq_ctx->vfi_lso_mps);
seq_printf(m, "W9: vfi_lso_sb\t\t\t%d\nW9: vfi_lso_sizem1\t\t%d\n",
sq_ctx->vfi_lso_sb, sq_ctx->vfi_lso_sizem1);
seq_printf(m, "W9: vfi_lso_total\t\t%d\n\n", sq_ctx->vfi_lso_total);
seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
(u64)sq_ctx->scm_lso_rem);
seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
(u64)sq_ctx->dropped_octs);
seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
(u64)sq_ctx->dropped_pkts);
}
/* Dumps given nix_rq's context */
static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
rq_ctx->wqe_aura, rq_ctx->substream);
seq_printf(m, "W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
rq_ctx->cq, rq_ctx->ena_wqwd);
seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
rq_ctx->ipsech_ena, rq_ctx->sso_ena);
seq_printf(m, "W0: ena \t\t\t%d\n\n", rq_ctx->ena);
seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
rq_ctx->lpb_drop_ena, rq_ctx->spb_drop_ena);
seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d\n",
rq_ctx->xqe_drop_ena, rq_ctx->wqe_caching);
seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
rq_ctx->pb_caching, rq_ctx->sso_tt);
seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
rq_ctx->sso_grp, rq_ctx->lpb_aura);
seq_printf(m, "W1: spb_aura \t\t\t%d\n\n", rq_ctx->spb_aura);
seq_printf(m, "W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d\n",
rq_ctx->xqe_hdr_split, rq_ctx->xqe_imm_copy);
seq_printf(m, "W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d\n",
rq_ctx->xqe_imm_size, rq_ctx->later_skip);
seq_printf(m, "W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d\n",
rq_ctx->first_skip, rq_ctx->lpb_sizem1);
seq_printf(m, "W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d\n",
rq_ctx->spb_ena, rq_ctx->wqe_skip);
seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n\n", rq_ctx->spb_sizem1);
seq_printf(m, "W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d\n",
rq_ctx->spb_pool_pass, rq_ctx->spb_pool_drop);
seq_printf(m, "W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d\n",
rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
seq_printf(m, "W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d\n",
rq_ctx->wqe_pool_pass, rq_ctx->wqe_pool_drop);
seq_printf(m, "W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n\n",
rq_ctx->xqe_pass, rq_ctx->xqe_drop);
seq_printf(m, "W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d\n",
rq_ctx->qint_idx, rq_ctx->rq_int_ena);
seq_printf(m, "W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d\n",
rq_ctx->rq_int, rq_ctx->lpb_pool_pass);
seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d\n",
rq_ctx->lpb_pool_drop, rq_ctx->lpb_aura_pass);
seq_printf(m, "W4: lpb_aura_drop \t\t%d\n\n", rq_ctx->lpb_aura_drop);
seq_printf(m, "W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d\n",
rq_ctx->flow_tagw, rq_ctx->bad_utag);
seq_printf(m, "W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n\n",
rq_ctx->good_utag, rq_ctx->ltag);
seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
}
/* Dumps given nix_cq's context */
static void print_nix_cq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_cq_ctx_s *cq_ctx = &rsp->cq;
seq_printf(m, "W0: base \t\t\t%llx\n\n", cq_ctx->base);
seq_printf(m, "W1: wrptr \t\t\t%llx\n", (u64)cq_ctx->wrptr);
seq_printf(m, "W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d\n",
cq_ctx->avg_con, cq_ctx->cint_idx);
seq_printf(m, "W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d\n",
cq_ctx->cq_err, cq_ctx->qint_idx);
seq_printf(m, "W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n\n",
cq_ctx->bpid, cq_ctx->bp_ena);
seq_printf(m, "W2: update_time \t\t%d\nW2:avg_level \t\t\t%d\n",
cq_ctx->update_time, cq_ctx->avg_level);
seq_printf(m, "W2: head \t\t\t%d\nW2:tail \t\t\t%d\n\n",
cq_ctx->head, cq_ctx->tail);
seq_printf(m, "W3: cq_err_int_ena \t\t%d\nW3:cq_err_int \t\t\t%d\n",
cq_ctx->cq_err_int_ena, cq_ctx->cq_err_int);
seq_printf(m, "W3: qsize \t\t\t%d\nW3:caching \t\t\t%d\n",
cq_ctx->qsize, cq_ctx->caching);
seq_printf(m, "W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d\n",
cq_ctx->substream, cq_ctx->ena);
seq_printf(m, "W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d\n",
cq_ctx->drop_ena, cq_ctx->drop);
seq_printf(m, "W3: bp \t\t\t\t%d\n\n", cq_ctx->bp);
}
static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
void *unused, int ctype)
{
void (*print_nix_ctx)(struct seq_file *filp,
struct nix_aq_enq_rsp *rsp) = NULL;
struct rvu *rvu = filp->private;
struct nix_aq_enq_req aq_req;
struct nix_aq_enq_rsp rsp;
char *ctype_string = NULL;
int qidx, rc, max_id = 0;
struct rvu_pfvf *pfvf;
int nixlf, id, all;
u16 pcifunc;
switch (ctype) {
case NIX_AQ_CTYPE_CQ:
nixlf = rvu->rvu_dbg.nix_cq_ctx.lf;
id = rvu->rvu_dbg.nix_cq_ctx.id;
all = rvu->rvu_dbg.nix_cq_ctx.all;
break;
case NIX_AQ_CTYPE_SQ:
nixlf = rvu->rvu_dbg.nix_sq_ctx.lf;
id = rvu->rvu_dbg.nix_sq_ctx.id;
all = rvu->rvu_dbg.nix_sq_ctx.all;
break;
case NIX_AQ_CTYPE_RQ:
nixlf = rvu->rvu_dbg.nix_rq_ctx.lf;
id = rvu->rvu_dbg.nix_rq_ctx.id;
all = rvu->rvu_dbg.nix_rq_ctx.all;
break;
default:
return -EINVAL;
}
if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (ctype == NIX_AQ_CTYPE_SQ && !pfvf->sq_ctx) {
seq_puts(filp, "SQ context is not initialized\n");
return -EINVAL;
} else if (ctype == NIX_AQ_CTYPE_RQ && !pfvf->rq_ctx) {
seq_puts(filp, "RQ context is not initialized\n");
return -EINVAL;
} else if (ctype == NIX_AQ_CTYPE_CQ && !pfvf->cq_ctx) {
seq_puts(filp, "CQ context is not initialized\n");
return -EINVAL;
}
if (ctype == NIX_AQ_CTYPE_SQ) {
max_id = pfvf->sq_ctx->qsize;
ctype_string = "sq";
print_nix_ctx = print_nix_sq_ctx;
} else if (ctype == NIX_AQ_CTYPE_RQ) {
max_id = pfvf->rq_ctx->qsize;
ctype_string = "rq";
print_nix_ctx = print_nix_rq_ctx;
} else if (ctype == NIX_AQ_CTYPE_CQ) {
max_id = pfvf->cq_ctx->qsize;
ctype_string = "cq";
print_nix_ctx = print_nix_cq_ctx;
}
memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
aq_req.hdr.pcifunc = pcifunc;
aq_req.ctype = ctype;
aq_req.op = NIX_AQ_INSTOP_READ;
if (all)
id = 0;
else
max_id = id + 1;
for (qidx = id; qidx < max_id; qidx++) {
aq_req.qidx = qidx;
seq_printf(filp, "=====%s_ctx for nixlf:%d and qidx:%d is=====\n",
ctype_string, nixlf, aq_req.qidx);
rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
if (rc) {
seq_puts(filp, "Failed to read the context\n");
return -EINVAL;
}
print_nix_ctx(filp, &rsp);
}
return 0;
}
static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
int id, int ctype, char *ctype_string)
{
struct rvu_pfvf *pfvf;
int max_id = 0;
u16 pcifunc;
if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (ctype == NIX_AQ_CTYPE_SQ) {
if (!pfvf->sq_ctx) {
dev_warn(rvu->dev, "SQ context is not initialized\n");
return -EINVAL;
}
max_id = pfvf->sq_ctx->qsize;
} else if (ctype == NIX_AQ_CTYPE_RQ) {
if (!pfvf->rq_ctx) {
dev_warn(rvu->dev, "RQ context is not initialized\n");
return -EINVAL;
}
max_id = pfvf->rq_ctx->qsize;
} else if (ctype == NIX_AQ_CTYPE_CQ) {
if (!pfvf->cq_ctx) {
dev_warn(rvu->dev, "CQ context is not initialized\n");
return -EINVAL;
}
max_id = pfvf->cq_ctx->qsize;
}
if (id < 0 || id >= max_id) {
dev_warn(rvu->dev, "Invalid %s_ctx valid range 0-%d\n",
ctype_string, max_id - 1);
return -EINVAL;
}
switch (ctype) {
case NIX_AQ_CTYPE_CQ:
rvu->rvu_dbg.nix_cq_ctx.lf = nixlf;
rvu->rvu_dbg.nix_cq_ctx.id = id;
rvu->rvu_dbg.nix_cq_ctx.all = all;
break;
case NIX_AQ_CTYPE_SQ:
rvu->rvu_dbg.nix_sq_ctx.lf = nixlf;
rvu->rvu_dbg.nix_sq_ctx.id = id;
rvu->rvu_dbg.nix_sq_ctx.all = all;
break;
case NIX_AQ_CTYPE_RQ:
rvu->rvu_dbg.nix_rq_ctx.lf = nixlf;
rvu->rvu_dbg.nix_rq_ctx.id = id;
rvu->rvu_dbg.nix_rq_ctx.all = all;
break;
default:
return -EINVAL;
}
return 0;
}
static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos,
int ctype)
{
struct seq_file *m = filp->private_data;
struct rvu *rvu = m->private;
char *cmd_buf, *ctype_string;
int nixlf, id = 0, ret;
bool all = false;
if ((*ppos != 0) || !count)
return -EINVAL;
switch (ctype) {
case NIX_AQ_CTYPE_SQ:
ctype_string = "sq";
break;
case NIX_AQ_CTYPE_RQ:
ctype_string = "rq";
break;
case NIX_AQ_CTYPE_CQ:
ctype_string = "cq";
break;
default:
return -EINVAL;
}
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
ret = parse_cmd_buffer_ctx(cmd_buf, &count, buffer,
&nixlf, &id, &all);
if (ret < 0) {
dev_info(rvu->dev,
"Usage: echo <nixlf> [%s number/all] > %s_ctx\n",
ctype_string, ctype_string);
goto done;
} else {
ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
ctype_string);
}
done:
kfree(cmd_buf);
return ret ? ret : count;
}
static ssize_t rvu_dbg_nix_sq_ctx_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
NIX_AQ_CTYPE_SQ);
}
static int rvu_dbg_nix_sq_ctx_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_SQ);
}
RVU_DEBUG_SEQ_FOPS(nix_sq_ctx, nix_sq_ctx_display, nix_sq_ctx_write);
static ssize_t rvu_dbg_nix_rq_ctx_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
NIX_AQ_CTYPE_RQ);
}
static int rvu_dbg_nix_rq_ctx_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_RQ);
}
RVU_DEBUG_SEQ_FOPS(nix_rq_ctx, nix_rq_ctx_display, nix_rq_ctx_write);
static ssize_t rvu_dbg_nix_cq_ctx_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
return rvu_dbg_nix_queue_ctx_write(filp, buffer, count, ppos,
NIX_AQ_CTYPE_CQ);
}
static int rvu_dbg_nix_cq_ctx_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_nix_queue_ctx_display(filp, unused, NIX_AQ_CTYPE_CQ);
}
RVU_DEBUG_SEQ_FOPS(nix_cq_ctx, nix_cq_ctx_display, nix_cq_ctx_write);
static void print_nix_qctx_qsize(struct seq_file *filp, int qsize,
unsigned long *bmap, char *qtype)
{
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return;
bitmap_print_to_pagebuf(false, buf, bmap, qsize);
seq_printf(filp, "%s context count : %d\n", qtype, qsize);
seq_printf(filp, "%s context ena/dis bitmap : %s\n",
qtype, buf);
kfree(buf);
}
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf)
{
if (!pfvf->cq_ctx)
seq_puts(filp, "cq context is not initialized\n");
else
print_nix_qctx_qsize(filp, pfvf->cq_ctx->qsize, pfvf->cq_bmap,
"cq");
if (!pfvf->rq_ctx)
seq_puts(filp, "rq context is not initialized\n");
else
print_nix_qctx_qsize(filp, pfvf->rq_ctx->qsize, pfvf->rq_bmap,
"rq");
if (!pfvf->sq_ctx)
seq_puts(filp, "sq context is not initialized\n");
else
print_nix_qctx_qsize(filp, pfvf->sq_ctx->qsize, pfvf->sq_bmap,
"sq");
}
static ssize_t rvu_dbg_nix_qsize_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
return rvu_dbg_qsize_write(filp, buffer, count, ppos,
BLKTYPE_NIX);
}
static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
{
return rvu_dbg_qsize_display(filp, unused, BLKTYPE_NIX);
}
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
static void rvu_dbg_nix_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
struct dentry *pfile;
rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
if (!rvu->rvu_dbg.nix) {
dev_err(rvu->dev, "create debugfs dir failed for nix\n");
return;
}
pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_sq_ctx_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_rq_ctx_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_cq_ctx_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_ndc_tx_cache_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_ndc_rx_cache_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_qsize_fops);
if (!pfile)
goto create_failed;
return;
create_failed:
dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
debugfs_remove_recursive(rvu->rvu_dbg.nix);
}
static void rvu_dbg_npa_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
struct dentry *pfile;
rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
if (!rvu->rvu_dbg.npa)
return;
pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_qsize_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_aura_ctx_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_pool_ctx_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
&rvu_dbg_npa_ndc_cache_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
if (!pfile)
goto create_failed;
return;
create_failed:
dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
debugfs_remove_recursive(rvu->rvu_dbg.npa);
}
#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
({ \
u64 cnt; \
err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
NIX_STATS_RX, &(cnt)); \
if (!err) \
seq_printf(s, "%s: %llu\n", name, cnt); \
cnt; \
})
#define PRINT_CGX_CUML_NIXTX_STATUS(idx, name) \
({ \
u64 cnt; \
err = rvu_cgx_nix_cuml_stats(rvu, cgxd, lmac_id, (idx), \
NIX_STATS_TX, &(cnt)); \
if (!err) \
seq_printf(s, "%s: %llu\n", name, cnt); \
cnt; \
})
static int cgx_print_stats(struct seq_file *s, int lmac_id)
{
struct cgx_link_user_info linfo;
void *cgxd = s->private;
u64 ucast, mcast, bcast;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
struct rvu *rvu;
rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
if (!rvu)
return -ENODEV;
/* Link status */
seq_puts(s, "\n=======Link Status======\n\n");
err = cgx_get_link_info(cgxd, lmac_id, &linfo);
if (err)
seq_puts(s, "Failed to read link status\n");
seq_printf(s, "\nLink is %s %d Mbps\n\n",
linfo.link_up ? "UP" : "DOWN", linfo.speed);
/* Rx stats */
seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
if (err)
return err;
mcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_MCAST, "rx_mcast_frames");
if (err)
return err;
bcast = PRINT_CGX_CUML_NIXRX_STATUS(RX_BCAST, "rx_bcast_frames");
if (err)
return err;
seq_printf(s, "rx_frames: %llu\n", ucast + mcast + bcast);
PRINT_CGX_CUML_NIXRX_STATUS(RX_OCTS, "rx_bytes");
if (err)
return err;
PRINT_CGX_CUML_NIXRX_STATUS(RX_DROP, "rx_drops");
if (err)
return err;
PRINT_CGX_CUML_NIXRX_STATUS(RX_ERR, "rx_errors");
if (err)
return err;
/* Tx stats */
seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
if (err)
return err;
mcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_MCAST, "tx_mcast_frames");
if (err)
return err;
bcast = PRINT_CGX_CUML_NIXTX_STATUS(TX_BCAST, "tx_bcast_frames");
if (err)
return err;
seq_printf(s, "tx_frames: %llu\n", ucast + mcast + bcast);
PRINT_CGX_CUML_NIXTX_STATUS(TX_OCTS, "tx_bytes");
if (err)
return err;
PRINT_CGX_CUML_NIXTX_STATUS(TX_DROP, "tx_drops");
if (err)
return err;
/* Rx stats */
seq_puts(s, "\n=======CGX RX_STATS======\n\n");
while (stat < CGX_RX_STATS_COUNT) {
err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
if (err)
return err;
seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
stat++;
}
/* Tx stats */
stat = 0;
seq_puts(s, "\n=======CGX TX_STATS======\n\n");
while (stat < CGX_TX_STATS_COUNT) {
err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
if (err)
return err;
seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
stat++;
}
return err;
}
static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
{
struct dentry *current_dir;
int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
buf = strrchr(current_dir->d_name.name, 'c');
if (!buf)
return -EINVAL;
err = kstrtoint(buf + 1, 10, &lmac_id);
if (!err) {
err = cgx_print_stats(filp, lmac_id);
if (err)
return err;
}
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
struct dentry *pfile;
int i, lmac_id;
char dname[20];
void *cgx;
rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
cgx = rvu_cgx_pdata(i, rvu);
if (!cgx)
continue;
/* cgx debugfs dir */
sprintf(dname, "cgx%d", i);
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
pfile = debugfs_create_file("stats", 0600,
rvu->rvu_dbg.lmac, cgx,
&rvu_dbg_cgx_stat_fops);
if (!pfile)
goto create_failed;
}
}
return;
create_failed:
dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
}
/* NPC debugfs APIs */
static void rvu_print_npc_mcam_info(struct seq_file *s,
u16 pcifunc, int blkaddr)
{
struct rvu *rvu = s->private;
int entry_acnt, entry_ecnt;
int cntr_acnt, cntr_ecnt;
/* Skip PF0 */
if (!pcifunc)
return;
rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr,
&entry_acnt, &entry_ecnt);
rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr,
&cntr_acnt, &cntr_ecnt);
if (!entry_acnt && !cntr_acnt)
return;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(s, "\n\t\t Device \t\t: PF%d\n",
rvu_get_pf(pcifunc));
else
seq_printf(s, "\n\t\t Device \t\t: PF%d VF%d\n",
rvu_get_pf(pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
if (entry_acnt) {
seq_printf(s, "\t\t Entries allocated \t: %d\n", entry_acnt);
seq_printf(s, "\t\t Entries enabled \t: %d\n", entry_ecnt);
}
if (cntr_acnt) {
seq_printf(s, "\t\t Counters allocated \t: %d\n", cntr_acnt);
seq_printf(s, "\t\t Counters enabled \t: %d\n", cntr_ecnt);
}
}
static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
{
struct rvu *rvu = filp->private;
int pf, vf, numvfs, blkaddr;
struct npc_mcam *mcam;
u16 pcifunc;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return -ENODEV;
mcam = &rvu->hw->mcam;
seq_puts(filp, "\nNPC MCAM info:\n");
/* MCAM keywidth on receive and transmit sides */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
cfg = (cfg >> 32) & 0x07;
seq_printf(filp, "\t\t RX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
"112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
"224bits" : "448bits"));
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX));
cfg = (cfg >> 32) & 0x07;
seq_printf(filp, "\t\t TX keywidth \t: %s\n", (cfg == NPC_MCAM_KEY_X1) ?
"112bits" : ((cfg == NPC_MCAM_KEY_X2) ?
"224bits" : "448bits"));
mutex_lock(&mcam->lock);
/* MCAM entries */
seq_printf(filp, "\n\t\t MCAM entries \t: %d\n", mcam->total_entries);
seq_printf(filp, "\t\t Reserved \t: %d\n",
mcam->total_entries - mcam->bmap_entries);
seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
/* MCAM counters */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
cfg = (cfg >> 48) & 0xFFFF;
seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
seq_printf(filp, "\t\t Available \t: %d\n",
rvu_rsrc_free_count(&mcam->counters));
if (mcam->bmap_entries == mcam->bmap_fcnt) {
mutex_unlock(&mcam->lock);
return 0;
}
seq_puts(filp, "\n\t\t Current allocation\n");
seq_puts(filp, "\t\t====================\n");
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
pcifunc = (pf << RVU_PFVF_PF_SHIFT);
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
numvfs = (cfg >> 12) & 0xFF;
for (vf = 0; vf < numvfs; vf++) {
pcifunc = (pf << RVU_PFVF_PF_SHIFT) | (vf + 1);
rvu_print_npc_mcam_info(filp, pcifunc, blkaddr);
}
}
mutex_unlock(&mcam->lock);
return 0;
}
RVU_DEBUG_SEQ_FOPS(npc_mcam_info, npc_mcam_info_display, NULL);
static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
void *unused)
{
struct rvu *rvu = filp->private;
struct npc_mcam *mcam;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return -ENODEV;
mcam = &rvu->hw->mcam;
seq_puts(filp, "\nNPC MCAM RX miss action stats\n");
seq_printf(filp, "\t\tStat %d: \t%lld\n", mcam->rx_miss_act_cntr,
rvu_read64(rvu, blkaddr,
NPC_AF_MATCH_STATX(mcam->rx_miss_act_cntr)));
return 0;
}
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
static void rvu_dbg_npc_init(struct rvu *rvu)
{
const struct device *dev = &rvu->pdev->dev;
struct dentry *pfile;
rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
if (!rvu->rvu_dbg.npc)
return;
pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
rvu, &rvu_dbg_npc_mcam_info_fops);
if (!pfile)
goto create_failed;
pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
rvu, &rvu_dbg_npc_rx_miss_act_fops);
if (!pfile)
goto create_failed;
return;
create_failed:
dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
debugfs_remove_recursive(rvu->rvu_dbg.npc);
}
void rvu_dbg_init(struct rvu *rvu)
{
struct device *dev = &rvu->pdev->dev;
struct dentry *pfile;
rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
if (!rvu->rvu_dbg.root) {
dev_err(rvu->dev, "%s failed\n", __func__);
return;
}
pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
&rvu_dbg_rsrc_status_fops);
if (!pfile)
goto create_failed;
rvu_dbg_npa_init(rvu);
rvu_dbg_nix_init(rvu);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
return;
create_failed:
dev_err(dev, "Failed to create debugfs dir\n");
debugfs_remove_recursive(rvu->rvu_dbg.root);
}
void rvu_dbg_exit(struct rvu *rvu)
{
debugfs_remove_recursive(rvu->rvu_dbg.root);
}
#endif /* CONFIG_DEBUG_FS */
......@@ -64,7 +64,6 @@ enum nix_makr_fmt_indexes {
struct mce {
struct hlist_node node;
u16 idx;
u16 pcifunc;
};
......@@ -127,17 +126,12 @@ static void nix_rx_sync(struct rvu *rvu, int blkaddr)
err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
if (err)
dev_err(rvu->dev, "NIX RX software sync failed\n");
/* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
* bit too early. Hence wait for 50us more.
*/
if (is_rvu_9xxx_A0(rvu))
usleep_range(50, 60);
}
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u16 map_func;
......@@ -155,13 +149,15 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
mutex_unlock(&rvu->rsrc_lock);
/* For TL1 schq, sharing across VF's of same PF is ok */
if (lvl == NIX_TXSCH_LVL_TL1 &&
rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
return false;
/* TLs aggegating traffic are shared across PF and VFs */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
return false;
else
return true;
}
if (lvl != NIX_TXSCH_LVL_TL1 &&
map_func != pcifunc)
if (map_func != pcifunc)
return false;
return true;
......@@ -198,6 +194,11 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
/* Note that AF's VFs work in pairs and talk over consecutive
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
NIX_CHAN_LBK_CHX(0, vf + 1);
......@@ -382,7 +383,8 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
struct rvu_pfvf *pfvf, int nixlf,
int rss_sz, int rss_grps, int hwctx_size)
int rss_sz, int rss_grps, int hwctx_size,
u64 way_mask)
{
int err, grp, num_indices;
......@@ -402,7 +404,8 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr,
/* Config full RSS table size, enable RSS and caching */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf),
BIT_ULL(36) | BIT_ULL(4) |
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE));
ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) |
way_mask << 20);
/* Config RSS group offset and sizes */
for (grp = 0; grp < rss_grps; grp++)
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp),
......@@ -663,6 +666,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
static const char *nix_get_ctx_name(int ctype)
{
switch (ctype) {
case NIX_AQ_CTYPE_CQ:
return "CQ";
case NIX_AQ_CTYPE_SQ:
return "SQ";
case NIX_AQ_CTYPE_RQ:
return "RQ";
case NIX_AQ_CTYPE_RSS:
return "RSS";
}
return "";
}
static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
......@@ -707,21 +725,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
if (rc) {
err = rc;
dev_err(rvu->dev, "Failed to disable %s:%d context\n",
(req->ctype == NIX_AQ_CTYPE_CQ) ?
"CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
"RQ" : "SQ"), qidx);
nix_get_ctx_name(req->ctype), qidx);
}
}
return err;
}
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
{
struct nix_aq_enq_req lock_ctx_req;
int err;
if (req->op != NIX_AQ_INSTOP_INIT)
return 0;
if (req->ctype == NIX_AQ_CTYPE_MCE ||
req->ctype == NIX_AQ_CTYPE_DYNO)
return 0;
memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
lock_ctx_req.ctype = req->ctype;
lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
lock_ctx_req.qidx = req->qidx;
err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
if (err)
dev_err(rvu->dev,
"PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
req->hdr.pcifunc,
nix_get_ctx_name(req->ctype), req->qidx);
return err;
}
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
int err;
err = rvu_nix_aq_enq_inst(rvu, req, rsp);
if (!err)
err = nix_lf_hwctx_lockdown(rvu, req);
return err;
}
#else
int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
#endif
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
......@@ -745,6 +802,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
if (!req->rq_cnt || !req->sq_cnt || !req->cq_cnt)
return NIX_AF_ERR_PARAM;
if (req->way_mask)
req->way_mask &= 0xFFFF;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (!pfvf->nixlf || blkaddr < 0)
......@@ -810,7 +870,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
(u64)pfvf->rq_ctx->iova);
/* Set caching and queue count in HW */
cfg = BIT_ULL(36) | (req->rq_cnt - 1);
cfg = BIT_ULL(36) | (req->rq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RQS_CFG(nixlf), cfg);
/* Alloc NIX SQ HW context memory and config the base */
......@@ -825,7 +885,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_BASE(nixlf),
(u64)pfvf->sq_ctx->iova);
cfg = BIT_ULL(36) | (req->sq_cnt - 1);
cfg = BIT_ULL(36) | (req->sq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_SQS_CFG(nixlf), cfg);
/* Alloc NIX CQ HW context memory and config the base */
......@@ -840,13 +901,14 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_BASE(nixlf),
(u64)pfvf->cq_ctx->iova);
cfg = BIT_ULL(36) | (req->cq_cnt - 1);
cfg = BIT_ULL(36) | (req->cq_cnt - 1) | req->way_mask << 20;
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CQS_CFG(nixlf), cfg);
/* Initialize receive side scaling (RSS) */
hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF);
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf,
req->rss_sz, req->rss_grps, hwctx_size);
err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz,
req->rss_grps, hwctx_size, req->way_mask);
if (err)
goto free_mem;
......@@ -860,7 +922,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
(u64)pfvf->cq_ints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf), BIT_ULL(36));
rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_CFG(nixlf),
BIT_ULL(36) | req->way_mask << 20);
/* Alloc memory for QINT's HW contexts */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
......@@ -872,7 +936,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
(u64)pfvf->nix_qints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf),
BIT_ULL(36) | req->way_mask << 20);
/* Setup VLANX TPID's.
* Use VLAN1 for 802.1Q
......@@ -1048,6 +1113,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int link;
if (lvl >= hw->cap.nix_tx_aggr_lvl)
return;
/* Reset TL4's SDP link config */
if (lvl == NIX_TXSCH_LVL_TL4)
rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
......@@ -1061,83 +1129,185 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
static int
rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
u16 *schq_list, u16 *schq_cnt)
static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc)
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u16 schq_base;
u32 *pfvf_map;
int pf, intf;
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -ENODEV;
if (is_afvf(pcifunc)) {/* LBK links */
return hw->cgx_links;
} else if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return (cgx_id * hw->lmac_per_cgx) + lmac_id;
}
pfvf = rvu_get_pfvf(rvu, pcifunc);
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
pfvf_map = txsch->pfvf_map;
pf = rvu_get_pf(pcifunc);
/* SDP link */
return hw->cgx_links + hw->lbk_links;
}
/* static allocation as two TL1's per link */
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc,
int link, int *start, int *end)
{
struct rvu_hwinfo *hw = rvu->hw;
int pf = rvu_get_pf(pcifunc);
switch (intf) {
case NIX_INTF_TYPE_CGX:
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
break;
case NIX_INTF_TYPE_LBK:
schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
break;
default:
return -ENODEV;
if (is_afvf(pcifunc)) { /* LBK links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_lbk_lmac;
} else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */
*start = hw->cap.nix_txsch_per_cgx_lmac * link;
*end = *start + hw->cap.nix_txsch_per_cgx_lmac;
} else { /* SDP link */
*start = (hw->cap.nix_txsch_per_cgx_lmac * hw->cgx_links) +
(hw->cap.nix_txsch_per_lbk_lmac * hw->lbk_links);
*end = *start + hw->cap.nix_txsch_per_sdp_lmac;
}
}
if (schq_base + 1 > txsch->schq.max)
return -ENODEV;
static int nix_check_txschq_alloc_req(struct rvu *rvu, int lvl, u16 pcifunc,
struct nix_hw *nix_hw,
struct nix_txsch_alloc_req *req)
{
struct rvu_hwinfo *hw = rvu->hw;
int schq, req_schq, free_cnt;
struct nix_txsch *txsch;
int link, start, end;
/* init pfvf_map as we store flags */
if (pfvf_map[schq_base] == U32_MAX) {
pfvf_map[schq_base] =
TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
pfvf_map[schq_base + 1] =
TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl];
/* Onetime reset for TL1 */
nix_reset_tx_linkcfg(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base);
nix_reset_tx_shaping(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base);
if (!req_schq)
return 0;
nix_reset_tx_linkcfg(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base + 1);
nix_reset_tx_shaping(rvu, blkaddr,
NIX_TXSCH_LVL_TL1, schq_base + 1);
link = nix_get_tx_link(rvu, pcifunc);
/* For traffic aggregating scheduler level, one queue is enough */
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
if (req_schq != 1)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
return 0;
}
if (schq_list && schq_cnt) {
schq_list[0] = schq_base;
schq_list[1] = schq_base + 1;
*schq_cnt = 2;
/* Get free SCHQ count and check if request can be accomodated */
if (hw->cap.nix_fixed_txschq_mapping) {
nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
schq = start + (pcifunc & RVU_PFVF_FUNC_MASK);
if (end <= txsch->schq.max && schq < end &&
!test_bit(schq, txsch->schq.bmap))
free_cnt = 1;
else
free_cnt = 0;
} else {
free_cnt = rvu_rsrc_free_count(&txsch->schq);
}
if (free_cnt < req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
return NIX_AF_ERR_TLX_ALLOC_FAIL;
/* If contiguous queues are needed, check for availability */
if (!hw->cap.nix_fixed_txschq_mapping && req->schq_contig[lvl] &&
!rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
return NIX_AF_ERR_TLX_ALLOC_FAIL;
return 0;
}
static void nix_txsch_alloc(struct rvu *rvu, struct nix_txsch *txsch,
struct nix_txsch_alloc_rsp *rsp,
int lvl, int start, int end)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = rsp->hdr.pcifunc;
int idx, schq;
/* For traffic aggregating levels, queue alloc is based
* on transmit link to which PF_FUNC is mapped to.
*/
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
/* A single TL queue is allocated */
if (rsp->schq_contig[lvl]) {
rsp->schq_contig[lvl] = 1;
rsp->schq_contig_list[lvl][0] = start;
}
/* Both contig and non-contig reqs doesn't make sense here */
if (rsp->schq_contig[lvl])
rsp->schq[lvl] = 0;
if (rsp->schq[lvl]) {
rsp->schq[lvl] = 1;
rsp->schq_list[lvl][0] = start;
}
return;
}
/* Adjust the queue request count if HW supports
* only one queue per level configuration.
*/
if (hw->cap.nix_fixed_txschq_mapping) {
idx = pcifunc & RVU_PFVF_FUNC_MASK;
schq = start + idx;
if (idx >= (end - start) || test_bit(schq, txsch->schq.bmap)) {
rsp->schq_contig[lvl] = 0;
rsp->schq[lvl] = 0;
return;
}
if (rsp->schq_contig[lvl]) {
rsp->schq_contig[lvl] = 1;
set_bit(schq, txsch->schq.bmap);
rsp->schq_contig_list[lvl][0] = schq;
rsp->schq[lvl] = 0;
} else if (rsp->schq[lvl]) {
rsp->schq[lvl] = 1;
set_bit(schq, txsch->schq.bmap);
rsp->schq_list[lvl][0] = schq;
}
return;
}
/* Allocate contiguous queue indices requesty first */
if (rsp->schq_contig[lvl]) {
schq = bitmap_find_next_zero_area(txsch->schq.bmap,
txsch->schq.max, start,
rsp->schq_contig[lvl], 0);
if (schq >= end)
rsp->schq_contig[lvl] = 0;
for (idx = 0; idx < rsp->schq_contig[lvl]; idx++) {
set_bit(schq, txsch->schq.bmap);
rsp->schq_contig_list[lvl][idx] = schq;
schq++;
}
}
/* Allocate non-contiguous queue indices */
if (rsp->schq[lvl]) {
idx = 0;
for (schq = start; schq < end; schq++) {
if (!test_bit(schq, txsch->schq.bmap)) {
set_bit(schq, txsch->schq.bmap);
rsp->schq_list[lvl][idx++] = schq;
}
if (idx == rsp->schq[lvl])
break;
}
/* Update how many were allocated */
rsp->schq[lvl] = idx;
}
}
int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int link, blkaddr, rc = 0;
int lvl, idx, start, end;
struct nix_txsch *txsch;
int lvl, idx, req_schq;
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
int blkaddr, rc = 0;
u32 *pfvf_map;
u16 schq;
......@@ -1151,83 +1321,66 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
return -EINVAL;
mutex_lock(&rvu->rsrc_lock);
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl];
pfvf_map = txsch->pfvf_map;
if (!req_schq)
continue;
/* There are only 28 TL1s */
if (lvl == NIX_TXSCH_LVL_TL1) {
if (req->schq_contig[lvl] ||
req->schq[lvl] > 2 ||
rvu_get_tl1_schqs(rvu, blkaddr,
pcifunc, NULL, NULL))
goto err;
continue;
}
/* Check if request is valid */
if (req_schq > MAX_TXSCHQ_PER_FUNC)
goto err;
/* If contiguous queues are needed, check for availability */
if (req->schq_contig[lvl] &&
!rvu_rsrc_check_contig(&txsch->schq, req->schq_contig[lvl]))
goto err;
/* Check if full request can be accommodated */
if (req_schq >= rvu_rsrc_free_count(&txsch->schq))
/* Check if request is valid as per HW capabilities
* and can be accomodated.
*/
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
rc = nix_check_txschq_alloc_req(rvu, lvl, pcifunc, nix_hw, req);
if (rc)
goto err;
}
/* Allocate requested Tx scheduler queues */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl];
pfvf_map = txsch->pfvf_map;
rsp->schq[lvl] = req->schq[lvl];
if (!req->schq[lvl] && !req->schq_contig[lvl])
continue;
/* Handle TL1 specially as it is
* allocation is restricted to 2 TL1's
* per link
*/
rsp->schq[lvl] = req->schq[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl];
if (lvl == NIX_TXSCH_LVL_TL1) {
rsp->schq_contig[lvl] = 0;
rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
&rsp->schq_list[lvl][0],
&rsp->schq[lvl]);
continue;
link = nix_get_tx_link(rvu, pcifunc);
if (lvl >= hw->cap.nix_tx_aggr_lvl) {
start = link;
end = link;
} else if (hw->cap.nix_fixed_txschq_mapping) {
nix_get_txschq_range(rvu, pcifunc, link, &start, &end);
} else {
start = 0;
end = txsch->schq.max;
}
/* Alloc contiguous queues first */
if (req->schq_contig[lvl]) {
schq = rvu_alloc_rsrc_contig(&txsch->schq,
req->schq_contig[lvl]);
nix_txsch_alloc(rvu, txsch, rsp, lvl, start, end);
for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
/* Reset queue config */
for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
schq = rsp->schq_contig_list[lvl][idx];
if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_contig_list[lvl][idx] = schq;
schq++;
}
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
}
/* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) {
schq = rvu_alloc_rsrc(&txsch->schq);
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
schq = rsp->schq_list[lvl][idx];
if (!(TXSCH_MAP_FLAGS(pfvf_map[schq]) &
NIX_TXSCHQ_CFG_DONE))
pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_list[lvl][idx] = schq;
}
}
rsp->aggr_level = hw->cap.nix_tx_aggr_lvl;
rsp->aggr_lvl_rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
rsp->link_cfg_lvl = rvu_read64(rvu, blkaddr,
NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
goto exit;
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
......@@ -1236,13 +1389,50 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
return rc;
}
static void nix_smq_flush(struct rvu *rvu, int blkaddr,
int smq, u16 pcifunc, int nixlf)
{
int pf = rvu_get_pf(pcifunc);
u8 cgx_id = 0, lmac_id = 0;
int err, restore_tx_en = 0;
u64 cfg;
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
/* Disable backpressure from physical link,
* otherwise SMQ flush may stall.
*/
rvu_cgx_enadis_rx_bp(rvu, pf, false);
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
if (err)
dev_err(rvu->dev,
"NIXLF%d: SMQ%d flush failed\n", nixlf, smq);
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
}
static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
{
int blkaddr, nixlf, lvl, schq, err;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
......@@ -1275,26 +1465,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
for (schq = 0; schq < txsch->schq.max; schq++) {
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
/* Wait for flush to complete */
err = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
if (err) {
dev_err(rvu->dev,
"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
}
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
}
/* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
/* Free all SCHQ's except TL1 as
* TL1 is shared across all VF's for a RVU PF
*/
if (lvl == NIX_TXSCH_LVL_TL1)
/* TLs above aggregation level are shared across all PF
* and it's VFs, hence skip freeing them.
*/
if (lvl >= hw->cap.nix_tx_aggr_lvl)
continue;
txsch = &nix_hw->txsch[lvl];
......@@ -1302,7 +1481,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = 0;
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
}
mutex_unlock(&rvu->rsrc_lock);
......@@ -1319,13 +1498,12 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
static int nix_txschq_free_one(struct rvu *rvu,
struct nix_txsch_free_req *req)
{
int lvl, schq, nixlf, blkaddr, rc;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
int lvl, schq, nixlf, blkaddr;
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
u32 *pfvf_map;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
......@@ -1343,10 +1521,8 @@ static int nix_txschq_free_one(struct rvu *rvu,
schq = req->schq;
txsch = &nix_hw->txsch[lvl];
/* Don't allow freeing TL1 */
if (lvl > NIX_TXSCH_LVL_TL2 ||
schq >= txsch->schq.max)
goto err;
if (lvl >= hw->cap.nix_tx_aggr_lvl || schq >= txsch->schq.max)
return 0;
pfvf_map = txsch->pfvf_map;
mutex_lock(&rvu->rsrc_lock);
......@@ -1359,24 +1535,12 @@ static int nix_txschq_free_one(struct rvu *rvu,
/* Flush if it is a SMQ. Onus of disabling
* TL2/3 queue links before SMQ flush is on user
*/
if (lvl == NIX_TXSCH_LVL_SMQ) {
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
/* Do SMQ flush and set enqueue xoff */
cfg |= BIT_ULL(50) | BIT_ULL(49);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
/* Wait for flush to complete */
rc = rvu_poll_reg(rvu, blkaddr,
NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
if (rc) {
dev_err(rvu->dev,
"NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
}
}
if (lvl == NIX_TXSCH_LVL_SMQ)
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
/* Free the resource */
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = 0;
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
mutex_unlock(&rvu->rsrc_lock);
return 0;
err:
......@@ -1393,8 +1557,8 @@ int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
return nix_txschq_free_one(rvu, req);
}
static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
int lvl, u64 reg, u64 regval)
static bool is_txschq_hierarchy_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
int lvl, u64 reg, u64 regval)
{
u64 regbase = reg & 0xFFFF;
u16 schq, parent;
......@@ -1431,79 +1595,82 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true;
}
static int
nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg)
{
u16 schq_list[2], schq_cnt, schq;
int blkaddr, idx, err = 0;
u16 map_func, map_flags;
struct nix_hw *nix_hw;
u64 reg, regval;
u32 *pfvf_map;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
u64 regbase;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return -EINVAL;
pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
mutex_lock(&rvu->rsrc_lock);
err = rvu_get_tl1_schqs(rvu, blkaddr,
pcifunc, schq_list, &schq_cnt);
if (err)
goto unlock;
if (hw->cap.nix_shaping)
return true;
for (idx = 0; idx < schq_cnt; idx++) {
schq = schq_list[idx];
map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
/* If shaping and coloring is not supported, then
* *_CIR and *_PIR registers should not be configured.
*/
regbase = reg & 0xFFFF;
/* check if config is already done or this is pf */
if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
continue;
switch (lvl) {
case NIX_TXSCH_LVL_TL1:
if (regbase == NIX_AF_TL1X_CIR(0))
return false;
break;
case NIX_TXSCH_LVL_TL2:
if (regbase == NIX_AF_TL2X_CIR(0) ||
regbase == NIX_AF_TL2X_PIR(0))
return false;
break;
case NIX_TXSCH_LVL_TL3:
if (regbase == NIX_AF_TL3X_CIR(0) ||
regbase == NIX_AF_TL3X_PIR(0))
return false;
break;
case NIX_TXSCH_LVL_TL4:
if (regbase == NIX_AF_TL4X_CIR(0) ||
regbase == NIX_AF_TL4X_PIR(0))
return false;
break;
}
return true;
}
/* default configuration */
reg = NIX_AF_TL1X_TOPOLOGY(schq);
regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
rvu_write64(rvu, blkaddr, reg, regval);
reg = NIX_AF_TL1X_SCHEDULE(schq);
regval = TXSCH_TL1_DFLT_RR_QTM;
rvu_write64(rvu, blkaddr, reg, regval);
reg = NIX_AF_TL1X_CIR(schq);
regval = 0;
rvu_write64(rvu, blkaddr, reg, regval);
static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw,
u16 pcifunc, int blkaddr)
{
u32 *pfvf_map;
int schq;
map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
}
unlock:
mutex_unlock(&rvu->rsrc_lock);
return err;
schq = nix_get_tx_link(rvu, pcifunc);
pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
/* Skip if PF has already done the config */
if (TXSCH_MAP_FLAGS(pfvf_map[schq]) & NIX_TXSCHQ_CFG_DONE)
return;
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq),
(TXSCH_TL1_DFLT_RR_PRIO << 1));
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq),
TXSCH_TL1_DFLT_RR_QTM);
rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE);
}
int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
u16 map_func, map_flags;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
int nixlf, schq;
u32 *pfvf_map;
int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
req->num_regs > MAX_REGS_PER_MBOX_MSG)
return NIX_AF_INVAL_TXSCHQ_CFG;
err = nix_get_nixlf(rvu, pcifunc, &nixlf);
if (err)
return NIX_AF_ERR_AF_LF_INVALID;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
......@@ -1512,19 +1679,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
if (!nix_hw)
return -EINVAL;
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
txsch = &nix_hw->txsch[req->lvl];
pfvf_map = txsch->pfvf_map;
/* VF is only allowed to trigger
* setting default cfg on TL1
*/
if (pcifunc & RVU_PFVF_FUNC_MASK &&
req->lvl == NIX_TXSCH_LVL_TL1) {
return nix_tl1_default_cfg(rvu, pcifunc);
if (req->lvl >= hw->cap.nix_tx_aggr_lvl &&
pcifunc & RVU_PFVF_FUNC_MASK) {
mutex_lock(&rvu->rsrc_lock);
if (req->lvl == NIX_TXSCH_LVL_TL1)
nix_tl1_default_cfg(rvu, nix_hw, pcifunc, blkaddr);
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
for (idx = 0; idx < req->num_regs; idx++) {
......@@ -1532,10 +1696,14 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval = req->regval[idx];
schq_regbase = reg & 0xFFFF;
if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
txsch->lvl, reg, regval))
if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr,
txsch->lvl, reg, regval))
return NIX_AF_INVAL_TXSCHQ_CFG;
/* Check if shaping and coloring is supported */
if (!is_txschq_shaping_valid(hw, req->lvl, reg))
continue;
/* Replace PF/VF visible NIXLF slot with HW NIXLF id */
if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
......@@ -1544,32 +1712,36 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
regval |= ((u64)nixlf << 24);
}
/* Clear 'BP_ENA' config, if it's not allowed */
if (!hw->cap.nix_tx_link_bp) {
if (schq_regbase == NIX_AF_TL4X_SDP_LINK_CFG(0) ||
(schq_regbase & 0xFF00) ==
NIX_AF_TL3_TL2X_LINKX_CFG(0, 0))
regval &= ~BIT_ULL(13);
}
/* Mark config as done for TL1 by PF */
if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
mutex_lock(&rvu->rsrc_lock);
map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq],
NIX_TXSCHQ_CFG_DONE);
mutex_unlock(&rvu->rsrc_lock);
}
rvu_write64(rvu, blkaddr, reg, regval);
/* Check for SMQ flush, if so, poll for its completion */
/* SMQ flush is special hence split register writes such
* that flush first and write rest of the bits later.
*/
if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
(regval & BIT_ULL(49))) {
err = rvu_poll_reg(rvu, blkaddr,
reg, BIT_ULL(49), true);
if (err)
return NIX_AF_SMQ_FLUSH_FAILED;
schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf);
regval &= ~BIT_ULL(49);
}
rvu_write64(rvu, blkaddr, reg, regval);
}
return 0;
}
......@@ -1650,7 +1822,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
}
static int nix_update_mce_list(struct nix_mce_list *mce_list,
u16 pcifunc, int idx, bool add)
u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
......@@ -1679,7 +1851,6 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce)
return -ENOMEM;
mce->idx = idx;
mce->pcifunc = pcifunc;
if (!tail)
hlist_add_head(&mce->node, &mce_list->head);
......@@ -1691,12 +1862,12 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
{
int err = 0, idx, next_idx, count;
int err = 0, idx, next_idx, last_idx;
struct nix_mce_list *mce_list;
struct mce *mce, *next_mce;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
struct rvu_pfvf *pfvf;
struct mce *mce;
int blkaddr;
/* Broadcast pkt replication is not needed for AF's VFs, hence skip */
......@@ -1728,31 +1899,31 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
mutex_lock(&mcast->mce_lock);
err = nix_update_mce_list(mce_list, pcifunc, idx, add);
err = nix_update_mce_list(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
if (!mce_list->count)
if (!mce_list->count) {
rvu_npc_disable_bcast_entry(rvu, pcifunc);
goto end;
count = mce_list->count;
}
/* Dump the updated list to HW */
idx = pfvf->bcast_mce_idx;
last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
next_idx = 0;
count--;
if (count) {
next_mce = hlist_entry(mce->node.next,
struct mce, node);
next_idx = next_mce->idx;
}
if (idx > last_idx)
break;
next_idx = idx + 1;
/* EOL should be set in last MCE */
err = nix_setup_mce(rvu, mce->idx,
NIX_AQ_INSTOP_WRITE, mce->pcifunc,
next_idx, count ? false : true);
err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
mce->pcifunc, next_idx,
(next_idx > last_idx) ? true : false);
if (err)
goto end;
idx++;
}
end:
......@@ -1849,8 +2020,8 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
int err, lvl, schq;
u64 cfg, reg;
int err, lvl;
/* Get scheduler queue count of each type and alloc
* bitmap for each for alloc/free/attach operations.
......@@ -1888,7 +2059,8 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map)
return -ENOMEM;
memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
for (schq = 0; schq < txsch->schq.max; schq++)
txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE);
}
return 0;
}
......@@ -2032,51 +2204,82 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
if (field_marker)
memset(&tmp, 0, sizeof(tmp));
field_marker = true;
keyoff_marker = true;
switch (key_type) {
case NIX_FLOW_KEY_TYPE_PORT:
field->sel_chan = true;
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
field_marker = true;
keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_IPV4:
case NIX_FLOW_KEY_TYPE_INNR_IPV4:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP;
if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV4) {
field->lid = NPC_LID_LG;
field->ltype_match = NPC_LT_LG_TU_IP;
}
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
field->ltype_mask = 0xF; /* Match only IPv4 */
field_marker = true;
keyoff_marker = false;
break;
case NIX_FLOW_KEY_TYPE_IPV6:
case NIX_FLOW_KEY_TYPE_INNR_IPV6:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP6;
if (key_type == NIX_FLOW_KEY_TYPE_INNR_IPV6) {
field->lid = NPC_LID_LG;
field->ltype_match = NPC_LT_LG_TU_IP6;
}
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
field->ltype_mask = 0xF; /* Match only IPv6 */
field_marker = true;
keyoff_marker = true;
break;
case NIX_FLOW_KEY_TYPE_TCP:
case NIX_FLOW_KEY_TYPE_UDP:
case NIX_FLOW_KEY_TYPE_SCTP:
case NIX_FLOW_KEY_TYPE_INNR_TCP:
case NIX_FLOW_KEY_TYPE_INNR_UDP:
case NIX_FLOW_KEY_TYPE_INNR_SCTP:
field->lid = NPC_LID_LD;
if (key_type == NIX_FLOW_KEY_TYPE_INNR_TCP ||
key_type == NIX_FLOW_KEY_TYPE_INNR_UDP ||
key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP)
field->lid = NPC_LID_LH;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
/* Enum values for NPC_LID_LD and NPC_LID_LG are same,
* so no need to change the ltype_match, just change
* the lid for inner protocols
*/
BUILD_BUG_ON((int)NPC_LT_LD_TCP !=
(int)NPC_LT_LH_TU_TCP);
BUILD_BUG_ON((int)NPC_LT_LD_UDP !=
(int)NPC_LT_LH_TU_UDP);
BUILD_BUG_ON((int)NPC_LT_LD_SCTP !=
(int)NPC_LT_LH_TU_SCTP);
if ((key_type == NIX_FLOW_KEY_TYPE_TCP ||
key_type == NIX_FLOW_KEY_TYPE_INNR_TCP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_TCP;
group_member = true;
} else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
} else if ((key_type == NIX_FLOW_KEY_TYPE_UDP ||
key_type == NIX_FLOW_KEY_TYPE_INNR_UDP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_UDP;
group_member = true;
} else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
} else if ((key_type == NIX_FLOW_KEY_TYPE_SCTP ||
key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) &&
valid_key) {
field->ltype_match |= NPC_LT_LD_SCTP;
group_member = true;
}
field->ltype_mask = ~field->ltype_match;
if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
if (key_type == NIX_FLOW_KEY_TYPE_SCTP ||
key_type == NIX_FLOW_KEY_TYPE_INNR_SCTP) {
/* Handle the case where any of the group item
* is enabled in the group but not the final one
*/
......@@ -2084,13 +2287,73 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
valid_key = true;
group_member = false;
}
field_marker = true;
keyoff_marker = true;
} else {
field_marker = false;
keyoff_marker = false;
}
break;
case NIX_FLOW_KEY_TYPE_NVGRE:
field->lid = NPC_LID_LD;
field->hdr_offset = 4; /* VSID offset */
field->bytesm1 = 2;
field->ltype_match = NPC_LT_LD_NVGRE;
field->ltype_mask = 0xF;
break;
case NIX_FLOW_KEY_TYPE_VXLAN:
case NIX_FLOW_KEY_TYPE_GENEVE:
field->lid = NPC_LID_LE;
field->bytesm1 = 2;
field->hdr_offset = 4;
field->ltype_mask = 0xF;
field_marker = false;
keyoff_marker = false;
if (key_type == NIX_FLOW_KEY_TYPE_VXLAN && valid_key) {
field->ltype_match |= NPC_LT_LE_VXLAN;
group_member = true;
}
if (key_type == NIX_FLOW_KEY_TYPE_GENEVE && valid_key) {
field->ltype_match |= NPC_LT_LE_GENEVE;
group_member = true;
}
if (key_type == NIX_FLOW_KEY_TYPE_GENEVE) {
if (group_member) {
field->ltype_mask = ~field->ltype_match;
field_marker = true;
keyoff_marker = true;
valid_key = true;
group_member = false;
}
}
break;
case NIX_FLOW_KEY_TYPE_ETH_DMAC:
case NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC:
field->lid = NPC_LID_LA;
field->ltype_match = NPC_LT_LA_ETHER;
if (key_type == NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC) {
field->lid = NPC_LID_LF;
field->ltype_match = NPC_LT_LF_TU_ETHER;
}
field->hdr_offset = 0;
field->bytesm1 = 5; /* DMAC 6 Byte */
field->ltype_mask = 0xF;
break;
case NIX_FLOW_KEY_TYPE_IPV6_EXT:
field->lid = NPC_LID_LC;
field->hdr_offset = 40; /* IPV6 hdr */
field->bytesm1 = 0; /* 1 Byte ext hdr*/
field->ltype_match = NPC_LT_LC_IP6_EXT;
field->ltype_mask = 0xF;
break;
case NIX_FLOW_KEY_TYPE_GTPU:
field->lid = NPC_LID_LE;
field->hdr_offset = 4;
field->bytesm1 = 3; /* 4 bytes TID*/
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
}
field->ena = 1;
......@@ -2449,8 +2712,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
cfg &= ~(0xFFFFFULL << 12);
cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
return 0;
}
......@@ -2591,9 +2852,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link),
tx_credits);
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_EXPR_CREDIT(link),
tx_credits);
}
}
......@@ -2605,8 +2863,6 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
}
}
......@@ -2674,6 +2930,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
cfg &= ~0x3FFEULL;
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
/* Disable caching of SQB aka SQEs */
cfg |= 0x04ULL;
#endif
rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
/* Result structure can be followed by RQ/SQ/CQ context at
......@@ -2704,13 +2964,25 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
block = &hw->block[blkaddr];
/* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
* internal state when conditional clocks are turned off.
* Hence enable them.
*/
if (is_rvu_9xxx_A0(rvu))
if (is_rvu_96xx_B0(rvu)) {
/* As per a HW errata in 96xx A0/B0 silicon, NIX may corrupt
* internal state when conditional clocks are turned off.
* Hence enable them.
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
/* Set chan/link to backpressure TL3 instead of TL2 */
rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
/* Disable SQ manager's sticky mode operation (set TM6 = 0)
* This sticky mode is known to cause SQ stalls when multiple
* SQs are mapped to same SMQ and transmitting pkts at a time.
*/
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
cfg &= ~BIT_ULL(15);
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
}
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
......@@ -2763,23 +3035,23 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
(NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
(NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
(NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
(NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
(NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
(NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
(NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
(NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
0x0F);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
......@@ -2825,7 +3097,7 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
......@@ -2853,7 +3125,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
return 0;
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
......@@ -2867,7 +3140,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return err;
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
return 0;
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
......@@ -2883,6 +3157,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
ctx_req.ctype = NIX_AQ_CTYPE_SQ;
err = nix_lf_hwctx_disable(rvu, &ctx_req);
......
......@@ -52,8 +52,8 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
static int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
int rvu_npa_aq_enq_inst(struct rvu *rvu, struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
......@@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
{
struct npa_aq_enq_req lock_ctx_req;
int err;
if (req->op != NPA_AQ_INSTOP_INIT)
return 0;
memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
lock_ctx_req.ctype = req->ctype;
lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
lock_ctx_req.aura_id = req->aura_id;
err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
if (err)
dev_err(rvu->dev,
"PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
req->hdr.pcifunc,
(req->ctype == NPA_AQ_CTYPE_AURA) ?
"Aura" : "Pool", req->aura_id);
return err;
}
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
int err;
err = rvu_npa_aq_enq_inst(rvu, req, rsp);
if (!err)
err = npa_lf_hwctx_lockdown(rvu, req);
return err;
}
#else
int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
#endif
int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
......@@ -289,6 +327,9 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
req->aura_sz == NPA_AURA_SZ_0 || !req->nr_pools)
return NPA_AF_ERR_PARAM;
if (req->way_mask)
req->way_mask &= 0xFFFF;
pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
if (!pfvf->npalf || blkaddr < 0)
......@@ -345,7 +386,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
/* Clear way partition mask and set aura offset to '0' */
cfg &= ~(BIT_ULL(34) - 1);
/* Set aura size & enable caching of contexts */
cfg |= (req->aura_sz << 16) | BIT_ULL(34);
cfg |= (req->aura_sz << 16) | BIT_ULL(34) | req->way_mask;
rvu_write64(rvu, blkaddr, NPA_AF_LFX_AURAS_CFG(npalf), cfg);
/* Configure aura HW context's base */
......@@ -353,7 +395,8 @@ int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
(u64)pfvf->aura_ctx->iova);
/* Enable caching of qints hw context */
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf), BIT_ULL(36));
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_CFG(npalf),
BIT_ULL(36) | req->way_mask << 20);
rvu_write64(rvu, blkaddr, NPA_AF_LFX_QINTS_BASE(npalf),
(u64)pfvf->npa_qints_ctx->iova);
......@@ -422,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Do not bypass NDC cache */
cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
cfg &= ~0x03DULL;
#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
/* Disable caching of stack pages */
cfg |= 0x10ULL;
#endif
rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
/* Result structure can be followed by Aura/Pool context at
......
......@@ -120,6 +120,31 @@ static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
}
static void npc_clear_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index)
{
int bank = npc_get_bank(mcam, index);
int actbank = bank;
index &= (mcam->banksize - 1);
for (; bank < (actbank + mcam->banks_per_entry); bank++) {
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 1), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W0(index, bank, 0), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 1), 0);
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), 0);
}
}
static void npc_get_keyword(struct mcam_entry *entry, int idx,
u64 *cam0, u64 *cam1)
{
......@@ -211,6 +236,12 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
actindex = index;
index &= (mcam->banksize - 1);
/* Disable before mcam entry update */
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
/* Clear mcam entry to avoid writes being suppressed by NPC */
npc_clear_mcam_entry(rvu, mcam, blkaddr, actindex);
/* CAM1 takes the comparison value and
* CAM0 specifies match for a bit in key being '0' or '1' or 'dontcare'.
* CAM1<n> = 0 & CAM0<n> = 1 => match if key<n> = 0
......@@ -251,8 +282,6 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
/* Enable the entry */
if (enable)
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
else
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
}
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
......@@ -354,8 +383,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
NIX_INTF_RX, &entry, true);
/* add VLAN matching, setup action and save entry back for later */
entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
entry.vtag_action = VTAG0_VALID_BIT |
FIELD_PREP(VTAG0_TYPE_MASK, 0) |
......@@ -448,68 +477,75 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
#ifdef MCAST_MCE
struct rvu_pfvf *pfvf;
#endif
int blkaddr, index;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
/* Only PF can add a bcast match entry */
if (pcifunc & RVU_PFVF_FUNC_MASK)
/* Skip LBK VFs */
if (is_afvf(pcifunc))
return;
#ifdef MCAST_MCE
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
#endif
/* If pkt replication is not supported,
* then only PF is allowed to add a bcast match entry.
*/
if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
return;
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
/* Check for L2B bit and LMAC channel
* NOTE: Since MKEX default profile(a reduced version intended to
* accommodate more capability but igoring few bits) a stap-gap
* approach.
* Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
* moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
* on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
* DSCP.
*
* Reduced layout of MKEX default profile -
* Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
*
* BIT_POS[31:28] : LD
* BIT_POS[27:24] : LC
* BIT_POS[23:20] : LB
* BIT_POS[19:16] : LA
* BIT_POS[15:12] : L3B, L3M, L2B, L2M
* BIT_POS[11:00] : CHAN
*
/* Match ingress channel */
entry.kw[0] = chan;
entry.kw_mask[0] = 0xfffull;
/* Match broadcast MAC address.
* DMAC is extracted at 0th bit of PARSE_KEX::KW1
*/
entry.kw[0] = BIT_ULL(13) | chan;
entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
entry.kw[1] = 0xffffffffffffull;
entry.kw_mask[1] = 0xffffffffffffull;
*(u64 *)&action = 0x00;
#ifdef MCAST_MCE
/* Early silicon doesn't support pkt replication,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
action.op = NIX_RX_ACTIONOP_MCAST;
action.pf_func = pcifunc;
action.index = pfvf->bcast_mce_idx;
#else
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
#endif
if (!hw->cap.nix_rx_multicast) {
/* Early silicon doesn't support pkt replication,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
} else {
pfvf = rvu_get_pfvf(rvu, pcifunc);
action.index = pfvf->bcast_mce_idx;
action.op = NIX_RX_ACTIONOP_MCAST;
}
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
}
void rvu_npc_disable_bcast_entry(struct rvu *rvu, u16 pcifunc)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
}
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
......@@ -704,8 +740,7 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
/* Layer B: Stacked VLAN (STAG|QinQ) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
/* Layer C: IPv4 */
/* SIP+DIP: 8 bytes, KW2[63:0] */
......@@ -806,11 +841,11 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
/* Compare with mkex mod_param name string */
if (mcam_kex->mkex_sign == MKEX_SIGN &&
!strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
/* Due to an errata (35786) in A0 pass silicon,
/* Due to an errata (35786) in A0/B0 pass silicon,
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
if (is_rvu_9xxx_A0(rvu) &&
if (is_rvu_96xx_B0(rvu) &&
mcam_kex->keyx_cfg[NIX_INTF_RX] !=
mcam_kex->keyx_cfg[NIX_INTF_TX])
goto load_default;
......@@ -1064,6 +1099,13 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
/* Reserve last counter for MCAM RX miss action which is set to
* drop pkt. This way we will know how many pkts didn't match
* any MCAM entry.
*/
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
......@@ -1101,6 +1143,7 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int rvu_npc_init(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 keyz = NPC_MCAM_KEY_X2;
int blkaddr, entry, bank, err;
u64 cfg, nibble_ena;
......@@ -1143,7 +1186,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
(NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
(NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
......@@ -1165,7 +1208,7 @@ int rvu_npc_init(struct rvu *rvu)
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
* configuration has to be identical for both Rx and Tx interfaces.
*/
if (!is_rvu_9xxx_A0(rvu))
if (!is_rvu_96xx_B0(rvu))
nibble_ena = (1ULL << 19) - 1;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
((keyz & 0x3) << 32) | nibble_ena);
......@@ -1183,9 +1226,13 @@ int rvu_npc_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
NIX_TX_ACTIONOP_UCAST_DEFAULT);
/* If MCAM lookup doesn't result in a match, drop the received packet */
/* If MCAM lookup doesn't result in a match, drop the received packet.
* And map this action to a counter to count dropped pkts.
*/
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
NIX_RX_ACTIONOP_DROP);
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
BIT_ULL(9) | mcam->rx_miss_act_cntr);
return 0;
}
......@@ -1200,6 +1247,44 @@ void rvu_npc_freemem(struct rvu *rvu)
mutex_destroy(&mcam->lock);
}
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int entry;
*alloc_cnt = 0;
*enable_cnt = 0;
for (entry = 0; entry < mcam->bmap_entries; entry++) {
if (mcam->entry2pfvf_map[entry] == pcifunc) {
(*alloc_cnt)++;
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, entry))
(*enable_cnt)++;
}
}
}
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int cntr;
*alloc_cnt = 0;
*enable_cnt = 0;
for (cntr = 0; cntr < mcam->counters.max; cntr++) {
if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
(*alloc_cnt)++;
if (mcam->cntr_refcnt[cntr])
(*enable_cnt)++;
}
}
}
static int npc_mcam_verify_entry(struct npc_mcam *mcam,
u16 pcifunc, int entry)
{
......
......@@ -246,6 +246,7 @@
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
#define NIX_AF_SQM_DBG_CTL_STATUS (0x750)
#define NIX_AF_PSE_CHANNEL_LEVEL (0x800)
#define NIX_AF_PSE_SHAPER_CFG (0x810)
#define NIX_AF_TX_EXPR_CREDIT (0x830)
......@@ -435,7 +436,6 @@
#define CPT_AF_LF_RST (0x44000)
#define CPT_AF_BLK_RST (0x46000)
#define NDC_AF_BLK_RST (0x002F0)
#define NPC_AF_BLK_RST (0x00040)
/* NPC */
......@@ -499,4 +499,30 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
/* NDC */
#define NDC_AF_CONST (0x00000)
#define NDC_AF_CLK_EN (0x00020)
#define NDC_AF_CTL (0x00030)
#define NDC_AF_BANK_CTL (0x00040)
#define NDC_AF_BANK_CTL_DONE (0x00048)
#define NDC_AF_INTR (0x00058)
#define NDC_AF_INTR_W1S (0x00060)
#define NDC_AF_INTR_ENA_W1S (0x00068)
#define NDC_AF_INTR_ENA_W1C (0x00070)
#define NDC_AF_ACTIVE_PC (0x00078)
#define NDC_AF_BP_TEST_ENABLE (0x001F8)
#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
#define NDC_AF_BLK_RST (0x002F0)
#define NDC_PRIV_AF_INT_CFG (0x002F8)
#define NDC_AF_HASHX(a) (0x00300 | (a) << 3)
#define NDC_AF_PORTX_RTX_RWX_REQ_PC(a, b, c) \
(0x00C00 | (a) << 5 | (b) << 4 | (c) << 3)
#define NDC_AF_PORTX_RTX_RWX_OSTDN_PC(a, b, c) \
(0x00D00 | (a) << 5 | (b) << 4 | (c) << 3)
#define NDC_AF_PORTX_RTX_RWX_LAT_PC(a, b, c) \
(0x00E00 | (a) << 5 | (b) << 4 | (c) << 3)
#define NDC_AF_PORTX_RTX_CANT_ALLOC_PC(a, b) \
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
#endif /* RVU_REG_H */
......@@ -13,22 +13,22 @@
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
BLKADDR_LMT = 0x1ULL,
BLKADDR_MSIX = 0x2ULL,
BLKADDR_NPA = 0x3ULL,
BLKADDR_NIX0 = 0x4ULL,
BLKADDR_NIX1 = 0x5ULL,
BLKADDR_NPC = 0x6ULL,
BLKADDR_SSO = 0x7ULL,
BLKADDR_SSOW = 0x8ULL,
BLKADDR_TIM = 0x9ULL,
BLKADDR_CPT0 = 0xaULL,
BLKADDR_CPT1 = 0xbULL,
BLKADDR_NDC0 = 0xcULL,
BLKADDR_NDC1 = 0xdULL,
BLKADDR_NDC2 = 0xeULL,
BLK_COUNT = 0xfULL,
BLKADDR_RVUM = 0x0ULL,
BLKADDR_LMT = 0x1ULL,
BLKADDR_MSIX = 0x2ULL,
BLKADDR_NPA = 0x3ULL,
BLKADDR_NIX0 = 0x4ULL,
BLKADDR_NIX1 = 0x5ULL,
BLKADDR_NPC = 0x6ULL,
BLKADDR_SSO = 0x7ULL,
BLKADDR_SSOW = 0x8ULL,
BLKADDR_TIM = 0x9ULL,
BLKADDR_CPT0 = 0xaULL,
BLKADDR_CPT1 = 0xbULL,
BLKADDR_NDC_NIX0_RX = 0xcULL,
BLKADDR_NDC_NIX0_TX = 0xdULL,
BLKADDR_NDC_NPA0 = 0xeULL,
BLK_COUNT = 0xfULL,
};
/* RVU Block Type Enumeration */
......@@ -474,9 +474,9 @@ struct nix_cq_ctx_s {
u64 ena : 1;
u64 drop_ena : 1;
u64 drop : 8;
u64 dp : 8;
u64 bp : 8;
#else
u64 dp : 8;
u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
u64 ena : 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment