Commit 9c79ddaa authored by Mintz, Yuval's avatar Mintz, Yuval Committed by David S. Miller

qed*: Add support for QL41xxx adapters

This adds the necessary infrastructure changes for initializing
and working with the new series of QL41xxx adapaters.

It also adds 2 new PCI device-IDs to qede:
  - 0x8070 for QL41xxx PFs
  - 0x8090 for VFs spawning from QL41xxx PFs
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 942c56ad
...@@ -219,7 +219,9 @@ enum QED_PORT_MODE { ...@@ -219,7 +219,9 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_4X20G, QED_PORT_MODE_DE_4X20G,
QED_PORT_MODE_DE_1X40G, QED_PORT_MODE_DE_1X40G,
QED_PORT_MODE_DE_2X25G, QED_PORT_MODE_DE_2X25G,
QED_PORT_MODE_DE_1X25G QED_PORT_MODE_DE_1X25G,
QED_PORT_MODE_DE_4X25G,
QED_PORT_MODE_DE_2X10G,
}; };
enum qed_dev_cap { enum qed_dev_cap {
...@@ -364,7 +366,8 @@ struct qed_hwfn { ...@@ -364,7 +366,8 @@ struct qed_hwfn {
#define IS_LEAD_HWFN(edev) (!((edev)->my_id)) #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/ u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id; u8 abs_pf_id;
#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1) #define QED_PATH_ID(_p_hwfn) \
(QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id; u8 port_id;
bool b_active; bool b_active;
...@@ -523,9 +526,7 @@ struct qed_dev { ...@@ -523,9 +526,7 @@ struct qed_dev {
u8 dp_level; u8 dp_level;
char name[NAME_SIZE]; char name[NAME_SIZE];
u8 type; enum qed_dev_type type;
#define QED_DEV_TYPE_BB (0 << 0)
#define QED_DEV_TYPE_AH BIT(0)
/* Translate type/revision combo into the proper conditions */ /* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ #define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
...@@ -540,6 +541,9 @@ struct qed_dev { ...@@ -540,6 +541,9 @@ struct qed_dev {
u16 vendor_id; u16 vendor_id;
u16 device_id; u16 device_id;
#define QED_DEV_ID_MASK 0xff00
#define QED_DEV_ID_MASK_BB 0x1600
#define QED_DEV_ID_MASK_AH 0x8000
u16 chip_num; u16 chip_num;
#define CHIP_NUM_MASK 0xffff #define CHIP_NUM_MASK 0xffff
...@@ -654,10 +658,16 @@ struct qed_dev { ...@@ -654,10 +658,16 @@ struct qed_dev {
u32 rdma_max_srq_sge; u32 rdma_max_srq_sge;
}; };
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB : MAX_NUM_VFS_K2)
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB : MAX_NUM_L2_QUEUES_K2)
#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
: MAX_NUM_PORTS_K2)
#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
: MAX_SB_PER_PATH_K2)
#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
/** /**
* @brief qed_concrete_to_sw_fid - get the sw function id from * @brief qed_concrete_to_sw_fid - get the sw function id from
...@@ -694,6 +704,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, ...@@ -694,6 +704,7 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
int qed_device_num_engines(struct qed_dev *cdev);
/* Other Linux specific common definitions */ /* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name) #define DP_NAME(cdev) ((cdev)->name)
......
...@@ -1557,7 +1557,7 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, ...@@ -1557,7 +1557,7 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
dev_data->mode_enable[MODE_K2] = 1; dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) { } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB_B0; dev_data->chip_id = CHIP_BB_B0;
dev_data->mode_enable[MODE_BB_B0] = 1; dev_data->mode_enable[MODE_BB] = 1;
} else { } else {
return DBG_STATUS_UNKNOWN_CHIP; return DBG_STATUS_UNKNOWN_CHIP;
} }
......
...@@ -674,11 +674,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, ...@@ -674,11 +674,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{ {
int hw_mode = 0; int hw_mode = 0;
hw_mode = (1 << MODE_BB_B0); if (QED_IS_BB_B0(p_hwfn->cdev)) {
hw_mode |= 1 << MODE_BB;
} else if (QED_IS_AH(p_hwfn->cdev)) {
hw_mode |= 1 << MODE_K2;
} else {
DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
p_hwfn->cdev->type);
return -EINVAL;
}
switch (p_hwfn->cdev->num_ports_in_engines) { switch (p_hwfn->cdev->num_ports_in_engines) {
case 1: case 1:
...@@ -693,7 +701,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) ...@@ -693,7 +701,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
default: default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
p_hwfn->cdev->num_ports_in_engines); p_hwfn->cdev->num_ports_in_engines);
return; return -EINVAL;
} }
switch (p_hwfn->cdev->mf_mode) { switch (p_hwfn->cdev->mf_mode) {
...@@ -719,6 +727,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) ...@@ -719,6 +727,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
"Configuring function for hw_mode: 0x%08x\n", "Configuring function for hw_mode: 0x%08x\n",
p_hwfn->hw_info.hw_mode); p_hwfn->hw_info.hw_mode);
return 0;
} }
/* Init run time data for all PFs on an engine. */ /* Init run time data for all PFs on an engine. */
...@@ -754,10 +764,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -754,10 +764,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params; struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id; u16 num_pfs, pf_id;
u32 concrete_fid; u32 concrete_fid;
int rc = 0; int rc = 0;
u8 vf_id;
qed_init_cau_rt_data(cdev); qed_init_cau_rt_data(cdev);
...@@ -814,7 +824,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -814,7 +824,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
} }
for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
...@@ -1135,7 +1146,9 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -1135,7 +1146,9 @@ int qed_hw_init(struct qed_dev *cdev,
/* Enable DMAE in PXP */ /* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
qed_calc_hw_mode(p_hwfn); rc = qed_calc_hw_mode(p_hwfn);
if (rc)
return rc;
rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
if (rc) { if (rc) {
...@@ -1485,10 +1498,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) ...@@ -1485,10 +1498,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{ {
/* clear indirect access */ /* clear indirect access */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); if (QED_IS_AH(p_hwfn->cdev)) {
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt,
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0); PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
} else {
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
/* Clean Previous errors if such exist */ /* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt, qed_wr(p_hwfn, p_hwfn->p_main_ptt,
...@@ -1610,6 +1638,7 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, ...@@ -1610,6 +1638,7 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
enum qed_resources res_id) enum qed_resources res_id)
{ {
u8 num_funcs = p_hwfn->num_funcs_on_engine; u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt_info;
u32 dflt_resc_num = 0; u32 dflt_resc_num = 0;
...@@ -1620,17 +1649,22 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, ...@@ -1620,17 +1649,22 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
dflt_resc_num = sb_cnt_info.sb_cnt; dflt_resc_num = sb_cnt_info.sb_cnt;
break; break;
case QED_L2_QUEUE: case QED_L2_QUEUE:
dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs; dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2
: MAX_NUM_L2_QUEUES_BB) / num_funcs;
break; break;
case QED_VPORT: case QED_VPORT:
dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs; dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2
: MAX_NUM_VPORTS_BB) / num_funcs;
break; break;
case QED_RSS_ENG: case QED_RSS_ENG:
dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs; dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2
: ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break; break;
case QED_PQ: case QED_PQ:
/* The granularity of the PQs is 8 */ /* The granularity of the PQs is 8 */
dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs; dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2
: MAX_QM_TX_QUEUES_BB) / num_funcs;
dflt_resc_num &= ~0x7; dflt_resc_num &= ~0x7;
break; break;
case QED_RL: case QED_RL:
...@@ -1642,7 +1676,8 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, ...@@ -1642,7 +1676,8 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break; break;
case QED_ILT: case QED_ILT:
dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs; dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2
: PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break; break;
case QED_LL2_QUEUE: case QED_LL2_QUEUE:
dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs; dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
...@@ -1653,7 +1688,10 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn, ...@@ -1653,7 +1688,10 @@ static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs; dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break; break;
case QED_RDMA_STATS_QUEUE: case QED_RDMA_STATS_QUEUE:
dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs; dflt_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
: RDMA_NUM_STATISTIC_COUNTERS_BB) /
num_funcs;
break; break;
default: default:
break; break;
...@@ -1780,6 +1818,7 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, ...@@ -1780,6 +1818,7 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{ {
bool b_ah = QED_IS_AH(p_hwfn->cdev);
u8 res_id; u8 res_id;
int rc; int rc;
...@@ -1790,7 +1829,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) ...@@ -1790,7 +1829,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
} }
/* Sanity for ILT */ /* Sanity for ILT */
if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) { if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
(!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, QED_ILT), RESC_START(p_hwfn, QED_ILT),
RESC_END(p_hwfn, QED_ILT) - 1); RESC_END(p_hwfn, QED_ILT) - 1);
...@@ -1860,9 +1900,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1860,9 +1900,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break; break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break; break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
break;
default: default:
DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break; break;
...@@ -1976,8 +2022,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1976,8 +2022,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
struct qed_dev *cdev = p_hwfn->cdev;
num_funcs = MAX_NUM_PFS_BB; num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected. * in the other bits are selected.
...@@ -1990,12 +2037,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1990,12 +2037,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) { if (reg_function_hide & 0x1) {
if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) { if (QED_IS_BB(cdev)) {
num_funcs = 0; if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
eng_mask = 0xaaaa; num_funcs = 0;
eng_mask = 0xaaaa;
} else {
num_funcs = 1;
eng_mask = 0x5554;
}
} else { } else {
num_funcs = 1; num_funcs = 1;
eng_mask = 0x5554; eng_mask = 0xfffe;
} }
/* Get the number of the enabled functions on the engine */ /* Get the number of the enabled functions on the engine */
...@@ -2027,24 +2079,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2027,24 +2079,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
} }
static int static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
struct qed_ptt *p_ptt,
enum qed_pci_personality personality)
{ {
u32 port_mode; u32 port_mode;
int rc;
/* Since all information is common, only first hwfns should do this */ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (IS_LEAD_HWFN(p_hwfn)) {
rc = qed_iov_hw_info(p_hwfn);
if (rc)
return rc;
}
/* Read the port mode */
port_mode = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) { if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engines = 1; p_hwfn->cdev->num_ports_in_engines = 1;
...@@ -2057,6 +2097,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -2057,6 +2097,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
/* Default num_ports_in_engines to something */ /* Default num_ports_in_engines to something */
p_hwfn->cdev->num_ports_in_engines = 1; p_hwfn->cdev->num_ports_in_engines = 1;
} }
}
static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 port;
int i;
p_hwfn->cdev->num_ports_in_engines = 0;
for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
if (port & 1)
p_hwfn->cdev->num_ports_in_engines++;
}
if (!p_hwfn->cdev->num_ports_in_engines) {
DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
/* Default num_ports_in_engine to something */
p_hwfn->cdev->num_ports_in_engines = 1;
}
}
static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
if (QED_IS_BB(p_hwfn->cdev))
qed_hw_info_port_num_bb(p_hwfn, p_ptt);
else
qed_hw_info_port_num_ah(p_hwfn, p_ptt);
}
static int
qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_pci_personality personality)
{
int rc;
/* Since all information is common, only first hwfns should do this */
if (IS_LEAD_HWFN(p_hwfn)) {
rc = qed_iov_hw_info(p_hwfn);
if (rc)
return rc;
}
qed_hw_info_port_num(p_hwfn, p_ptt);
qed_hw_get_nvm_info(p_hwfn, p_ptt); qed_hw_get_nvm_info(p_hwfn, p_ptt);
...@@ -2096,19 +2184,33 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -2096,19 +2184,33 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
static int qed_get_dev_info(struct qed_dev *cdev) static int qed_get_dev_info(struct qed_dev *cdev)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
u16 device_id_mask;
u32 tmp; u32 tmp;
/* Read Vendor Id / Device Id */ /* Read Vendor Id / Device Id */
pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
/* Determine type */
device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
switch (device_id_mask) {
case QED_DEV_ID_MASK_BB:
cdev->type = QED_DEV_TYPE_BB;
break;
case QED_DEV_ID_MASK_AH:
cdev->type = QED_DEV_TYPE_AH;
break;
default:
DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
return -EBUSY;
}
cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM); MISCS_REG_CHIP_NUM);
cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_REV); MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, cdev->chip_rev); MASK_FIELD(CHIP_REV, cdev->chip_rev);
cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */ /* Learn number of HW-functions */
tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt, tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR); MISCS_REG_CMT_ENABLED_FOR_PAIR);
...@@ -2128,7 +2230,10 @@ static int qed_get_dev_info(struct qed_dev *cdev) ...@@ -2128,7 +2230,10 @@ static int qed_get_dev_info(struct qed_dev *cdev)
MASK_FIELD(CHIP_METAL, cdev->chip_metal); MASK_FIELD(CHIP_METAL, cdev->chip_metal);
DP_INFO(cdev->hwfns, DP_INFO(cdev->hwfns,
"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
QED_IS_BB(cdev) ? "BB" : "AH",
'A' + cdev->chip_rev,
(int)cdev->chip_metal,
cdev->chip_num, cdev->chip_rev, cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal); cdev->chip_bond_id, cdev->chip_metal);
...@@ -3364,3 +3469,8 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -3364,3 +3469,8 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
memset(p_hwfn->qm_info.wfq_data, 0, memset(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
} }
int qed_device_num_engines(struct qed_dev *cdev)
{
return QED_IS_BB(cdev) ? 2 : 1;
}
...@@ -2502,7 +2502,7 @@ struct fw_info_location { ...@@ -2502,7 +2502,7 @@ struct fw_info_location {
enum init_modes { enum init_modes {
MODE_RESERVED, MODE_RESERVED,
MODE_BB_B0, MODE_BB,
MODE_K2, MODE_K2,
MODE_ASIC, MODE_ASIC,
MODE_RESERVED2, MODE_RESERVED2,
...@@ -9431,11 +9431,24 @@ struct eth_stats { ...@@ -9431,11 +9431,24 @@ struct eth_stats {
u64 r511; u64 r511;
u64 r1023; u64 r1023;
u64 r1518; u64 r1518;
u64 r1522;
u64 r2047; union {
u64 r4095; struct {
u64 r9216; u64 r1522;
u64 r16383; u64 r2047;
u64 r4095;
u64 r9216;
u64 r16383;
} bb0;
struct {
u64 unused1;
u64 r1519_to_max;
u64 unused2;
u64 unused3;
u64 unused4;
} ah0;
} u0;
u64 rfcs; u64 rfcs;
u64 rxcf; u64 rxcf;
u64 rxpf; u64 rxpf;
...@@ -9452,14 +9465,36 @@ struct eth_stats { ...@@ -9452,14 +9465,36 @@ struct eth_stats {
u64 t511; u64 t511;
u64 t1023; u64 t1023;
u64 t1518; u64 t1518;
u64 t2047;
u64 t4095; union {
u64 t9216; struct {
u64 t16383; u64 t2047;
u64 t4095;
u64 t9216;
u64 t16383;
} bb1;
struct {
u64 t1519_to_max;
u64 unused6;
u64 unused7;
u64 unused8;
} ah1;
} u1;
u64 txpf; u64 txpf;
u64 txpp; u64 txpp;
u64 tlpiec;
u64 tncl; union {
struct {
u64 tlpiec;
u64 tncl;
} bb2;
struct {
u64 unused9;
u64 unused10;
} ah2;
} u2;
u64 rbyte; u64 rbyte;
u64 rxuca; u64 rxuca;
u64 rxmca; u64 rxmca;
...@@ -10263,6 +10298,8 @@ struct nvm_cfg1_glob { ...@@ -10263,6 +10298,8 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
u32 e_lane_cfg1; u32 e_lane_cfg1;
u32 e_lane_cfg2; u32 e_lane_cfg2;
u32 f_lane_cfg1; u32 f_lane_cfg1;
......
...@@ -1470,13 +1470,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, ...@@ -1470,13 +1470,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
memset(&pstats, 0, sizeof(pstats)); memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); p_stats->common.tx_ucast_bytes +=
p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); HILO_64_REGPAIR(pstats.sent_ucast_bytes);
p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); p_stats->common.tx_mcast_bytes +=
p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); HILO_64_REGPAIR(pstats.sent_mcast_bytes);
p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); p_stats->common.tx_bcast_bytes +=
p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); HILO_64_REGPAIR(pstats.sent_bcast_bytes);
p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); p_stats->common.tx_ucast_pkts +=
HILO_64_REGPAIR(pstats.sent_ucast_pkts);
p_stats->common.tx_mcast_pkts +=
HILO_64_REGPAIR(pstats.sent_mcast_pkts);
p_stats->common.tx_bcast_pkts +=
HILO_64_REGPAIR(pstats.sent_bcast_pkts);
p_stats->common.tx_err_drop_pkts +=
HILO_64_REGPAIR(pstats.error_drop_pkts);
} }
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
...@@ -1502,10 +1509,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, ...@@ -1502,10 +1509,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
memset(&tstats, 0, sizeof(tstats)); memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
p_stats->mftag_filter_discards += p_stats->common.mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard); HILO_64_REGPAIR(tstats.mftag_filter_discard);
p_stats->mac_filter_discards += p_stats->common.mac_filter_discards +=
HILO_64_REGPAIR(tstats.eth_mac_filter_discard); HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
} }
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
...@@ -1539,12 +1546,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, ...@@ -1539,12 +1546,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
memset(&ustats, 0, sizeof(ustats)); memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); p_stats->common.rx_ucast_bytes +=
p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); p_stats->common.rx_mcast_bytes +=
p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); p_stats->common.rx_bcast_bytes +=
p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
} }
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
...@@ -1578,23 +1588,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, ...@@ -1578,23 +1588,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
memset(&mstats, 0, sizeof(mstats)); memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); p_stats->common.no_buff_discards +=
p_stats->packet_too_big_discard += HILO_64_REGPAIR(mstats.no_buff_discard);
HILO_64_REGPAIR(mstats.packet_too_big_discard); p_stats->common.packet_too_big_discard +=
p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); HILO_64_REGPAIR(mstats.packet_too_big_discard);
p_stats->tpa_coalesced_pkts += p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); p_stats->common.tpa_coalesced_pkts +=
p_stats->tpa_coalesced_events += HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
HILO_64_REGPAIR(mstats.tpa_coalesced_events); p_stats->common.tpa_coalesced_events +=
p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); HILO_64_REGPAIR(mstats.tpa_coalesced_events);
p_stats->tpa_coalesced_bytes += p_stats->common.tpa_aborts_num +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->common.tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
} }
static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats) struct qed_eth_stats *p_stats)
{ {
struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats; struct port_stats port_stats;
int j; int j;
...@@ -1605,54 +1618,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, ...@@ -1605,54 +1618,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats), offsetof(struct public_port, stats),
sizeof(port_stats)); sizeof(port_stats));
p_stats->rx_64_byte_packets += port_stats.eth.r64; p_common->rx_64_byte_packets += port_stats.eth.r64;
p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; p_common->rx_crc_errors += port_stats.eth.rfcs;
p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; p_common->rx_pause_frames += port_stats.eth.rxpf;
p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; p_common->rx_pfc_frames += port_stats.eth.rxpp;
p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; p_common->rx_align_errors += port_stats.eth.raln;
p_stats->rx_crc_errors += port_stats.eth.rfcs; p_common->rx_carrier_errors += port_stats.eth.rfcr;
p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; p_common->rx_oversize_packets += port_stats.eth.rovr;
p_stats->rx_pause_frames += port_stats.eth.rxpf; p_common->rx_jabbers += port_stats.eth.rjbr;
p_stats->rx_pfc_frames += port_stats.eth.rxpp; p_common->rx_undersize_packets += port_stats.eth.rund;
p_stats->rx_align_errors += port_stats.eth.raln; p_common->rx_fragments += port_stats.eth.rfrg;
p_stats->rx_carrier_errors += port_stats.eth.rfcr; p_common->tx_64_byte_packets += port_stats.eth.t64;
p_stats->rx_oversize_packets += port_stats.eth.rovr; p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
p_stats->rx_jabbers += port_stats.eth.rjbr; p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
p_stats->rx_undersize_packets += port_stats.eth.rund; p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
p_stats->rx_fragments += port_stats.eth.rfrg; p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
p_stats->tx_64_byte_packets += port_stats.eth.t64; p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; p_common->tx_pause_frames += port_stats.eth.txpf;
p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; p_common->tx_pfc_frames += port_stats.eth.txpp;
p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; p_common->rx_mac_bytes += port_stats.eth.rbyte;
p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; p_common->tx_mac_bytes += port_stats.eth.tbyte;
p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; p_common->tx_mac_uc_packets += port_stats.eth.txuca;
p_stats->tx_pause_frames += port_stats.eth.txpf; p_common->tx_mac_mc_packets += port_stats.eth.txmca;
p_stats->tx_pfc_frames += port_stats.eth.txpp; p_common->tx_mac_bc_packets += port_stats.eth.txbca;
p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
p_stats->tx_total_collisions += port_stats.eth.tncl;
p_stats->rx_mac_bytes += port_stats.eth.rbyte;
p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
p_stats->tx_mac_bytes += port_stats.eth.tbyte;
p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) { for (j = 0; j < 8; j++) {
p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; p_common->brb_truncates += port_stats.brb.brb_truncate[j];
p_stats->brb_discards += port_stats.brb.brb_discard[j]; p_common->brb_discards += port_stats.brb.brb_discard[j];
}
if (QED_IS_BB(p_hwfn->cdev)) {
struct qed_eth_stats_bb *p_bb = &p_stats->bb;
p_bb->rx_1519_to_1522_byte_packets +=
port_stats.eth.u0.bb0.r1522;
p_bb->rx_1519_to_2047_byte_packets +=
port_stats.eth.u0.bb0.r2047;
p_bb->rx_2048_to_4095_byte_packets +=
port_stats.eth.u0.bb0.r4095;
p_bb->rx_4096_to_9216_byte_packets +=
port_stats.eth.u0.bb0.r9216;
p_bb->rx_9217_to_16383_byte_packets +=
port_stats.eth.u0.bb0.r16383;
p_bb->tx_1519_to_2047_byte_packets +=
port_stats.eth.u1.bb1.t2047;
p_bb->tx_2048_to_4095_byte_packets +=
port_stats.eth.u1.bb1.t4095;
p_bb->tx_4096_to_9216_byte_packets +=
port_stats.eth.u1.bb1.t9216;
p_bb->tx_9217_to_16383_byte_packets +=
port_stats.eth.u1.bb1.t16383;
p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
} else {
struct qed_eth_stats_ah *p_ah = &p_stats->ah;
p_ah->rx_1519_to_max_byte_packets +=
port_stats.eth.u0.ah0.r1519_to_max;
p_ah->tx_1519_to_max_byte_packets =
port_stats.eth.u1.ah1.t1519_to_max;
} }
} }
......
...@@ -238,6 +238,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -238,6 +238,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
QED_PCI_ETH_ROCE); QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
if (IS_PF(cdev)) { if (IS_PF(cdev)) {
...@@ -1653,8 +1654,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev, ...@@ -1653,8 +1654,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) { switch (type) {
case QED_MCP_LAN_STATS: case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats); qed_get_vport_stats(cdev, &eth_stats);
stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts; stats->lan_stats.ucast_rx_pkts =
stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts; eth_stats.common.rx_ucast_pkts;
stats->lan_stats.ucast_tx_pkts =
eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1; stats->lan_stats.fcs_err = -1;
break; break;
case QED_MCP_FCOE_STATS: case QED_MCP_FCOE_STATS:
......
...@@ -479,11 +479,10 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, ...@@ -479,11 +479,10 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
rel_pfid) rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
/* TODO - this is only correct as long as only BB is supported, and #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
* no port-swapping is implemented; Afterwards we'll need to fix it. ((_p_hwfn)->cdev->num_ports_in_engines * \
*/ qed_device_num_engines((_p_hwfn)->cdev)))
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * 2))
struct qed_mcp_info { struct qed_mcp_info {
/* Spinlock used for protecting the access to the MFW mailbox */ /* Spinlock used for protecting the access to the MFW mailbox */
spinlock_t lock; spinlock_t lock;
......
...@@ -262,12 +262,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev) ...@@ -262,12 +262,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
/* Pause free running counter */ /* Pause free running counter */
qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); if (QED_IS_BB_B0(p_hwfn->cdev))
qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
if (QED_IS_AH(p_hwfn->cdev))
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
/* Resume free running counter */ /* Resume free running counter */
qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); if (QED_IS_BB_B0(p_hwfn->cdev))
qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
if (QED_IS_AH(p_hwfn->cdev)) {
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
}
/* Disable drift register */ /* Disable drift register */
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
......
...@@ -160,13 +160,13 @@ ...@@ -160,13 +160,13 @@
0x2e0704UL 0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \ #define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL 0x2e0708UL
#define PGLUE_B_REG_PGL_ADDR_88_F0 \ #define PGLUE_B_REG_PGL_ADDR_88_F0_BB \
0x2aa404UL 0x2aa404UL
#define PGLUE_B_REG_PGL_ADDR_8C_F0 \ #define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
0x2aa408UL 0x2aa408UL
#define PGLUE_B_REG_PGL_ADDR_90_F0 \ #define PGLUE_B_REG_PGL_ADDR_90_F0_BB \
0x2aa40cUL 0x2aa40cUL
#define PGLUE_B_REG_PGL_ADDR_94_F0 \ #define PGLUE_B_REG_PGL_ADDR_94_F0_BB \
0x2aa410UL 0x2aa410UL
#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \ #define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
0x2aa138UL 0x2aa138UL
...@@ -1550,4 +1550,13 @@ ...@@ -1550,4 +1550,13 @@
#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL #define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL #define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL #define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
#endif #endif
...@@ -557,14 +557,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) ...@@ -557,14 +557,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0; return 0;
} }
/* Calculate the first VF index - this is a bit tricky; Basically, /* First VF index based on offset is tricky:
* VFs start at offset 16 relative to PF0, and 2nd engine VFs begin * - If ARI is supported [likely], offset - (16 - pf_id) would
* after the first engine's VFs. * provide the number for eng0. 2nd engine Vfs would begin
* after the first engine's VFs.
* - If !ARI, VFs would start on next device.
* so offset - (256 - pf_id) would provide the number.
* Utilize the fact that (256 - pf_id) is achieved only by later
* to diffrentiate between the two.
*/ */
cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
p_hwfn->abs_pf_id - 16; if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
if (QED_PATH_ID(p_hwfn)) u32 first = p_hwfn->cdev->p_iov_info->offset +
cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; p_hwfn->abs_pf_id - 16;
cdev->p_iov_info->first_vf_in_pf = first;
if (QED_PATH_ID(p_hwfn))
cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
} else {
u32 first = p_hwfn->cdev->p_iov_info->offset +
p_hwfn->abs_pf_id - 256;
cdev->p_iov_info->first_vf_in_pf = first;
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV, DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"First VF in hwfn 0x%08x\n", "First VF in hwfn 0x%08x\n",
......
...@@ -58,7 +58,7 @@ ...@@ -58,7 +58,7 @@
#define DRV_MODULE_SYM qede #define DRV_MODULE_SYM qede
struct qede_stats { struct qede_stats_common {
u64 no_buff_discards; u64 no_buff_discards;
u64 packet_too_big_discard; u64 packet_too_big_discard;
u64 ttl0_discard; u64 ttl0_discard;
...@@ -90,11 +90,6 @@ struct qede_stats { ...@@ -90,11 +90,6 @@ struct qede_stats {
u64 rx_256_to_511_byte_packets; u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets; u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets; u64 rx_1024_to_1518_byte_packets;
u64 rx_1519_to_1522_byte_packets;
u64 rx_1519_to_2047_byte_packets;
u64 rx_2048_to_4095_byte_packets;
u64 rx_4096_to_9216_byte_packets;
u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors; u64 rx_crc_errors;
u64 rx_mac_crtl_frames; u64 rx_mac_crtl_frames;
u64 rx_pause_frames; u64 rx_pause_frames;
...@@ -111,17 +106,39 @@ struct qede_stats { ...@@ -111,17 +106,39 @@ struct qede_stats {
u64 tx_256_to_511_byte_packets; u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets; u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets; u64 tx_1024_to_1518_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
u64 brb_truncates;
u64 brb_discards;
u64 tx_mac_ctrl_frames;
};
struct qede_stats_bb {
u64 rx_1519_to_1522_byte_packets;
u64 rx_1519_to_2047_byte_packets;
u64 rx_2048_to_4095_byte_packets;
u64 rx_4096_to_9216_byte_packets;
u64 rx_9217_to_16383_byte_packets;
u64 tx_1519_to_2047_byte_packets; u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets; u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets; u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets; u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
u64 tx_lpi_entry_count; u64 tx_lpi_entry_count;
u64 tx_total_collisions; u64 tx_total_collisions;
u64 brb_truncates; };
u64 brb_discards;
u64 tx_mac_ctrl_frames; struct qede_stats_ah {
u64 rx_1519_to_max_byte_packets;
u64 tx_1519_to_max_byte_packets;
};
struct qede_stats {
struct qede_stats_common common;
union {
struct qede_stats_bb bb;
struct qede_stats_ah ah;
};
}; };
struct qede_vlan { struct qede_vlan {
...@@ -158,6 +175,10 @@ struct qede_dev { ...@@ -158,6 +175,10 @@ struct qede_dev {
struct qed_dev_eth_info dev_info; struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_IS_BB(edev) \
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
#define QEDE_IS_AH(edev) \
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array; struct qede_fastpath *fp_array;
u8 req_num_tx; u8 req_num_tx;
......
...@@ -75,16 +75,33 @@ static const struct { ...@@ -75,16 +75,33 @@ static const struct {
QEDE_TQSTAT(stopped_cnt), QEDE_TQSTAT(stopped_cnt),
}; };
#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name)) #define QEDE_STAT_OFFSET(stat_name, type, base) \
#define QEDE_STAT_STRING(stat_name) (#stat_name) (offsetof(type, stat_name) + (base))
#define _QEDE_STAT(stat_name, pf_only) \ #define QEDE_STAT_STRING(stat_name) (#stat_name)
{QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only} #define _QEDE_STAT(stat_name, type, base, attr) \
#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true) {QEDE_STAT_OFFSET(stat_name, type, base), \
#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false) QEDE_STAT_STRING(stat_name), \
attr}
#define QEDE_STAT(stat_name) \
_QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
#define QEDE_PF_STAT(stat_name) \
_QEDE_STAT(stat_name, struct qede_stats_common, 0, \
BIT(QEDE_STAT_PF_ONLY))
#define QEDE_PF_BB_STAT(stat_name) \
_QEDE_STAT(stat_name, struct qede_stats_bb, \
offsetof(struct qede_stats, bb), \
BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
#define QEDE_PF_AH_STAT(stat_name) \
_QEDE_STAT(stat_name, struct qede_stats_ah, \
offsetof(struct qede_stats, ah), \
BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
static const struct { static const struct {
u64 offset; u64 offset;
char string[ETH_GSTRING_LEN]; char string[ETH_GSTRING_LEN];
bool pf_only; unsigned long attr;
#define QEDE_STAT_PF_ONLY 0
#define QEDE_STAT_BB_ONLY 1
#define QEDE_STAT_AH_ONLY 2
} qede_stats_arr[] = { } qede_stats_arr[] = {
QEDE_STAT(rx_ucast_bytes), QEDE_STAT(rx_ucast_bytes),
QEDE_STAT(rx_mcast_bytes), QEDE_STAT(rx_mcast_bytes),
...@@ -106,22 +123,23 @@ static const struct { ...@@ -106,22 +123,23 @@ static const struct {
QEDE_PF_STAT(rx_256_to_511_byte_packets), QEDE_PF_STAT(rx_256_to_511_byte_packets),
QEDE_PF_STAT(rx_512_to_1023_byte_packets), QEDE_PF_STAT(rx_512_to_1023_byte_packets),
QEDE_PF_STAT(rx_1024_to_1518_byte_packets), QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
QEDE_PF_STAT(rx_1519_to_1522_byte_packets), QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
QEDE_PF_STAT(rx_1519_to_2047_byte_packets), QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
QEDE_PF_STAT(rx_2048_to_4095_byte_packets), QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
QEDE_PF_STAT(rx_4096_to_9216_byte_packets), QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
QEDE_PF_STAT(rx_9217_to_16383_byte_packets), QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets), QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets), QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets), QEDE_PF_STAT(tx_128_to_255_byte_packets),
QEDE_PF_STAT(tx_256_to_511_byte_packets), QEDE_PF_STAT(tx_256_to_511_byte_packets),
QEDE_PF_STAT(tx_512_to_1023_byte_packets), QEDE_PF_STAT(tx_512_to_1023_byte_packets),
QEDE_PF_STAT(tx_1024_to_1518_byte_packets), QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
QEDE_PF_STAT(tx_1519_to_2047_byte_packets), QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
QEDE_PF_STAT(tx_2048_to_4095_byte_packets), QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
QEDE_PF_STAT(tx_4096_to_9216_byte_packets), QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
QEDE_PF_STAT(tx_9217_to_16383_byte_packets), QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
QEDE_PF_STAT(rx_mac_crtl_frames), QEDE_PF_STAT(rx_mac_crtl_frames),
QEDE_PF_STAT(tx_mac_ctrl_frames), QEDE_PF_STAT(tx_mac_ctrl_frames),
QEDE_PF_STAT(rx_pause_frames), QEDE_PF_STAT(rx_pause_frames),
...@@ -136,8 +154,8 @@ static const struct { ...@@ -136,8 +154,8 @@ static const struct {
QEDE_PF_STAT(rx_jabbers), QEDE_PF_STAT(rx_jabbers),
QEDE_PF_STAT(rx_undersize_packets), QEDE_PF_STAT(rx_undersize_packets),
QEDE_PF_STAT(rx_fragments), QEDE_PF_STAT(rx_fragments),
QEDE_PF_STAT(tx_lpi_entry_count), QEDE_PF_BB_STAT(tx_lpi_entry_count),
QEDE_PF_STAT(tx_total_collisions), QEDE_PF_BB_STAT(tx_total_collisions),
QEDE_PF_STAT(brb_truncates), QEDE_PF_STAT(brb_truncates),
QEDE_PF_STAT(brb_discards), QEDE_PF_STAT(brb_discards),
QEDE_STAT(no_buff_discards), QEDE_STAT(no_buff_discards),
...@@ -155,6 +173,12 @@ static const struct { ...@@ -155,6 +173,12 @@ static const struct {
}; };
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
#define QEDE_STAT_IS_PF_ONLY(i) \
test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
#define QEDE_STAT_IS_BB_ONLY(i) \
test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
#define QEDE_STAT_IS_AH_ONLY(i) \
test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
enum { enum {
QEDE_PRI_FLAG_CMT, QEDE_PRI_FLAG_CMT,
...@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev, ...@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
} }
} }
static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
{
return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
(QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
(QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
}
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{ {
struct qede_fastpath *fp; struct qede_fastpath *fp;
...@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) ...@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
/* Account for non-queue statistics */ /* Account for non-queue statistics */
for (i = 0; i < QEDE_NUM_STATS; i++) { for (i = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only) if (qede_is_irrelevant_stat(edev, i))
continue; continue;
strcpy(buf, qede_stats_arr[i].string); strcpy(buf, qede_stats_arr[i].string);
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
...@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev, ...@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
} }
for (i = 0; i < QEDE_NUM_STATS; i++) { for (i = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only) if (qede_is_irrelevant_stat(edev, i))
continue; continue;
*buf = *((u64 *)(((void *)&edev->stats) + *buf = *((u64 *)(((void *)&edev->stats) +
qede_stats_arr[i].offset)); qede_stats_arr[i].offset));
...@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev, ...@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev,
static int qede_get_sset_count(struct net_device *dev, int stringset) static int qede_get_sset_count(struct net_device *dev, int stringset)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
int num_stats = QEDE_NUM_STATS; int num_stats = QEDE_NUM_STATS, i;
switch (stringset) { switch (stringset) {
case ETH_SS_STATS: case ETH_SS_STATS:
if (IS_VF(edev)) { for (i = 0; i < QEDE_NUM_STATS; i++)
int i; if (qede_is_irrelevant_stat(edev, i))
num_stats--;
for (i = 0; i < QEDE_NUM_STATS; i++)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
/* Account for the Regular Tx statistics */ /* Account for the Regular Tx statistics */
num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS; num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
......
...@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops; ...@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_50 0x1654 #define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656 #define CHIP_NUM_57980S_25 0x1656
#define CHIP_NUM_57980S_IOV 0x1664 #define CHIP_NUM_57980S_IOV 0x1664
#define CHIP_NUM_AH 0x8070
#define CHIP_NUM_AH_IOV 0x8090
#ifndef PCI_DEVICE_ID_NX2_57980E #ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
...@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops; ...@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#define PCI_DEVICE_ID_AH CHIP_NUM_AH
#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
#endif #endif
enum qede_pci_private { enum qede_pci_private {
...@@ -109,6 +114,10 @@ static const struct pci_device_id qede_pci_tbl[] = { ...@@ -109,6 +114,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
#endif
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
#endif #endif
{ 0 } { 0 }
}; };
...@@ -314,122 +323,135 @@ static int qede_close(struct net_device *ndev); ...@@ -314,122 +323,135 @@ static int qede_close(struct net_device *ndev);
void qede_fill_by_demand_stats(struct qede_dev *edev) void qede_fill_by_demand_stats(struct qede_dev *edev)
{ {
struct qede_stats_common *p_common = &edev->stats.common;
struct qed_eth_stats stats; struct qed_eth_stats stats;
edev->ops->get_vport_stats(edev->cdev, &stats); edev->ops->get_vport_stats(edev->cdev, &stats);
edev->stats.no_buff_discards = stats.no_buff_discards;
edev->stats.packet_too_big_discard = stats.packet_too_big_discard; p_common->no_buff_discards = stats.common.no_buff_discards;
edev->stats.ttl0_discard = stats.ttl0_discard; p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; p_common->ttl0_discard = stats.common.ttl0_discard;
edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts; p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts; p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts; p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
edev->stats.mftag_filter_discards = stats.mftag_filter_discards; p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
edev->stats.mac_filter_discards = stats.mac_filter_discards; p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
p_common->mac_filter_discards = stats.common.mac_filter_discards;
edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes; p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes; p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts; p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts; p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts; p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts; p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts; p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
edev->stats.coalesced_events = stats.tpa_coalesced_events; p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
edev->stats.coalesced_aborts_num = stats.tpa_aborts_num; p_common->coalesced_events = stats.common.tpa_coalesced_events;
edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts; p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets; p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
edev->stats.rx_128_to_255_byte_packets = p_common->rx_65_to_127_byte_packets =
stats.rx_128_to_255_byte_packets; stats.common.rx_65_to_127_byte_packets;
edev->stats.rx_256_to_511_byte_packets = p_common->rx_128_to_255_byte_packets =
stats.rx_256_to_511_byte_packets; stats.common.rx_128_to_255_byte_packets;
edev->stats.rx_512_to_1023_byte_packets = p_common->rx_256_to_511_byte_packets =
stats.rx_512_to_1023_byte_packets; stats.common.rx_256_to_511_byte_packets;
edev->stats.rx_1024_to_1518_byte_packets = p_common->rx_512_to_1023_byte_packets =
stats.rx_1024_to_1518_byte_packets; stats.common.rx_512_to_1023_byte_packets;
edev->stats.rx_1519_to_1522_byte_packets = p_common->rx_1024_to_1518_byte_packets =
stats.rx_1519_to_1522_byte_packets; stats.common.rx_1024_to_1518_byte_packets;
edev->stats.rx_1519_to_2047_byte_packets = p_common->rx_crc_errors = stats.common.rx_crc_errors;
stats.rx_1519_to_2047_byte_packets; p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
edev->stats.rx_2048_to_4095_byte_packets = p_common->rx_pause_frames = stats.common.rx_pause_frames;
stats.rx_2048_to_4095_byte_packets; p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
edev->stats.rx_4096_to_9216_byte_packets = p_common->rx_align_errors = stats.common.rx_align_errors;
stats.rx_4096_to_9216_byte_packets; p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
edev->stats.rx_9217_to_16383_byte_packets = p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
stats.rx_9217_to_16383_byte_packets; p_common->rx_jabbers = stats.common.rx_jabbers;
edev->stats.rx_crc_errors = stats.rx_crc_errors; p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; p_common->rx_fragments = stats.common.rx_fragments;
edev->stats.rx_pause_frames = stats.rx_pause_frames; p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
edev->stats.rx_pfc_frames = stats.rx_pfc_frames; p_common->tx_65_to_127_byte_packets =
edev->stats.rx_align_errors = stats.rx_align_errors; stats.common.tx_65_to_127_byte_packets;
edev->stats.rx_carrier_errors = stats.rx_carrier_errors; p_common->tx_128_to_255_byte_packets =
edev->stats.rx_oversize_packets = stats.rx_oversize_packets; stats.common.tx_128_to_255_byte_packets;
edev->stats.rx_jabbers = stats.rx_jabbers; p_common->tx_256_to_511_byte_packets =
edev->stats.rx_undersize_packets = stats.rx_undersize_packets; stats.common.tx_256_to_511_byte_packets;
edev->stats.rx_fragments = stats.rx_fragments; p_common->tx_512_to_1023_byte_packets =
edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets; stats.common.tx_512_to_1023_byte_packets;
edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets; p_common->tx_1024_to_1518_byte_packets =
edev->stats.tx_128_to_255_byte_packets = stats.common.tx_1024_to_1518_byte_packets;
stats.tx_128_to_255_byte_packets; p_common->tx_pause_frames = stats.common.tx_pause_frames;
edev->stats.tx_256_to_511_byte_packets = p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
stats.tx_256_to_511_byte_packets; p_common->brb_truncates = stats.common.brb_truncates;
edev->stats.tx_512_to_1023_byte_packets = p_common->brb_discards = stats.common.brb_discards;
stats.tx_512_to_1023_byte_packets; p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
edev->stats.tx_1024_to_1518_byte_packets =
stats.tx_1024_to_1518_byte_packets; if (QEDE_IS_BB(edev)) {
edev->stats.tx_1519_to_2047_byte_packets = struct qede_stats_bb *p_bb = &edev->stats.bb;
stats.tx_1519_to_2047_byte_packets;
edev->stats.tx_2048_to_4095_byte_packets = p_bb->rx_1519_to_1522_byte_packets =
stats.tx_2048_to_4095_byte_packets; stats.bb.rx_1519_to_1522_byte_packets;
edev->stats.tx_4096_to_9216_byte_packets = p_bb->rx_1519_to_2047_byte_packets =
stats.tx_4096_to_9216_byte_packets; stats.bb.rx_1519_to_2047_byte_packets;
edev->stats.tx_9217_to_16383_byte_packets = p_bb->rx_2048_to_4095_byte_packets =
stats.tx_9217_to_16383_byte_packets; stats.bb.rx_2048_to_4095_byte_packets;
edev->stats.tx_pause_frames = stats.tx_pause_frames; p_bb->rx_4096_to_9216_byte_packets =
edev->stats.tx_pfc_frames = stats.tx_pfc_frames; stats.bb.rx_4096_to_9216_byte_packets;
edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count; p_bb->rx_9217_to_16383_byte_packets =
edev->stats.tx_total_collisions = stats.tx_total_collisions; stats.bb.rx_9217_to_16383_byte_packets;
edev->stats.brb_truncates = stats.brb_truncates; p_bb->tx_1519_to_2047_byte_packets =
edev->stats.brb_discards = stats.brb_discards; stats.bb.tx_1519_to_2047_byte_packets;
edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; p_bb->tx_2048_to_4095_byte_packets =
stats.bb.tx_2048_to_4095_byte_packets;
p_bb->tx_4096_to_9216_byte_packets =
stats.bb.tx_4096_to_9216_byte_packets;
p_bb->tx_9217_to_16383_byte_packets =
stats.bb.tx_9217_to_16383_byte_packets;
p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
} else {
struct qede_stats_ah *p_ah = &edev->stats.ah;
p_ah->rx_1519_to_max_byte_packets =
stats.ah.rx_1519_to_max_byte_packets;
p_ah->tx_1519_to_max_byte_packets =
stats.ah.tx_1519_to_max_byte_packets;
}
} }
static void qede_get_stats64(struct net_device *dev, static void qede_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
struct qede_dev *edev = netdev_priv(dev); struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
qede_fill_by_demand_stats(edev); qede_fill_by_demand_stats(edev);
p_common = &edev->stats.common;
stats->rx_packets = edev->stats.rx_ucast_pkts + stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
edev->stats.rx_mcast_pkts + p_common->rx_bcast_pkts;
edev->stats.rx_bcast_pkts; stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
stats->tx_packets = edev->stats.tx_ucast_pkts + p_common->tx_bcast_pkts;
edev->stats.tx_mcast_pkts +
edev->stats.tx_bcast_pkts;
stats->rx_bytes = edev->stats.rx_ucast_bytes +
edev->stats.rx_mcast_bytes +
edev->stats.rx_bcast_bytes;
stats->tx_bytes = edev->stats.tx_ucast_bytes + stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
edev->stats.tx_mcast_bytes + p_common->rx_bcast_bytes;
edev->stats.tx_bcast_bytes; stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
p_common->tx_bcast_bytes;
stats->tx_errors = edev->stats.tx_err_drop_pkts; stats->tx_errors = p_common->tx_err_drop_pkts;
stats->multicast = edev->stats.rx_mcast_pkts + stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
edev->stats.rx_bcast_pkts;
stats->rx_fifo_errors = edev->stats.no_buff_discards; stats->rx_fifo_errors = p_common->no_buff_discards;
stats->collisions = edev->stats.tx_total_collisions; if (QEDE_IS_BB(edev))
stats->rx_crc_errors = edev->stats.rx_crc_errors; stats->collisions = edev->stats.bb.tx_total_collisions;
stats->rx_frame_errors = edev->stats.rx_align_errors; stats->rx_crc_errors = p_common->rx_crc_errors;
stats->rx_frame_errors = p_common->rx_align_errors;
} }
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
......
...@@ -300,6 +300,11 @@ struct qed_sb_info { ...@@ -300,6 +300,11 @@ struct qed_sb_info {
struct qed_dev *cdev; struct qed_dev *cdev;
}; };
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
};
struct qed_dev_info { struct qed_dev_info {
unsigned long pci_mem_start; unsigned long pci_mem_start;
unsigned long pci_mem_end; unsigned long pci_mem_end;
...@@ -325,6 +330,8 @@ struct qed_dev_info { ...@@ -325,6 +330,8 @@ struct qed_dev_info {
u16 mtu; u16 mtu;
bool wol_support; bool wol_support;
enum qed_dev_type dev_type;
}; };
enum qed_sb_type { enum qed_sb_type {
...@@ -752,7 +759,7 @@ enum qed_mf_mode { ...@@ -752,7 +759,7 @@ enum qed_mf_mode {
QED_MF_NPAR, QED_MF_NPAR,
}; };
struct qed_eth_stats { struct qed_eth_stats_common {
u64 no_buff_discards; u64 no_buff_discards;
u64 packet_too_big_discard; u64 packet_too_big_discard;
u64 ttl0_discard; u64 ttl0_discard;
...@@ -784,11 +791,6 @@ struct qed_eth_stats { ...@@ -784,11 +791,6 @@ struct qed_eth_stats {
u64 rx_256_to_511_byte_packets; u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets; u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets; u64 rx_1024_to_1518_byte_packets;
u64 rx_1519_to_1522_byte_packets;
u64 rx_1519_to_2047_byte_packets;
u64 rx_2048_to_4095_byte_packets;
u64 rx_4096_to_9216_byte_packets;
u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors; u64 rx_crc_errors;
u64 rx_mac_crtl_frames; u64 rx_mac_crtl_frames;
u64 rx_pause_frames; u64 rx_pause_frames;
...@@ -805,14 +807,8 @@ struct qed_eth_stats { ...@@ -805,14 +807,8 @@ struct qed_eth_stats {
u64 tx_256_to_511_byte_packets; u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets; u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets; u64 tx_1024_to_1518_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames; u64 tx_pause_frames;
u64 tx_pfc_frames; u64 tx_pfc_frames;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
u64 brb_truncates; u64 brb_truncates;
u64 brb_discards; u64 brb_discards;
u64 rx_mac_bytes; u64 rx_mac_bytes;
...@@ -827,6 +823,34 @@ struct qed_eth_stats { ...@@ -827,6 +823,34 @@ struct qed_eth_stats {
u64 tx_mac_ctrl_frames; u64 tx_mac_ctrl_frames;
}; };
struct qed_eth_stats_bb {
u64 rx_1519_to_1522_byte_packets;
u64 rx_1519_to_2047_byte_packets;
u64 rx_2048_to_4095_byte_packets;
u64 rx_4096_to_9216_byte_packets;
u64 rx_9217_to_16383_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
};
struct qed_eth_stats_ah {
u64 rx_1519_to_max_byte_packets;
u64 tx_1519_to_max_byte_packets;
};
struct qed_eth_stats {
struct qed_eth_stats_common common;
union {
struct qed_eth_stats_bb bb;
struct qed_eth_stats_ah ah;
};
};
#define QED_SB_IDX 0x0002 #define QED_SB_IDX 0x0002
#define RX_PI 0 #define RX_PI 0
......
...@@ -52,7 +52,8 @@ ...@@ -52,7 +52,8 @@
#define RDMA_MAX_PDS (64 * 1024) #define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment