Commit 5be9963d authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-stats-refactor'

Jie Wang says:

====================
net: hns3: refactor rss/tqp stats functions

Currently, hns3 PF and VF module have two sets of rss and tqp stats APIs
to provide get and set functions. Most of these APIs are the same. There is
no need to keep these two sets of same functions for double development and
bugfix work.

This series refactor the rss and tqp stats APIs in hns3 PF and VF by
implementing one set of common APIs for PF and VF reuse and deleting the
old APIs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c5bcdd82 43710bfe
......@@ -18,11 +18,12 @@ hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
hns3_common/hclge_comm_cmd.o
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
hns3_common/hclge_comm_cmd.o
hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
......@@ -61,7 +61,7 @@ static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
}
void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_comm_opcode_type opcode,
enum hclge_opcode_type opcode,
bool is_read)
{
memset((void *)desc, 0, sizeof(struct hclge_desc));
......@@ -73,15 +73,14 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
}
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, bool is_pf,
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, bool en)
{
struct hclge_comm_firmware_compat_cmd *req;
struct hclge_desc desc;
u32 compat = 0;
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_COMM_OPC_IMP_COMPAT_CFG,
false);
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
if (en) {
req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
......@@ -96,7 +95,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, bool is_pf,
req->compat = cpu_to_le32(compat);
}
return hclge_comm_cmd_send(hw, &desc, 1, is_pf);
return hclge_comm_cmd_send(hw, &desc, 1);
}
void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
......@@ -205,11 +204,11 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
struct hclge_desc desc;
int ret;
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_COMM_OPC_QUERY_FW_VER, 1);
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_comm_query_version_cmd *)desc.data;
resp->api_caps = hclge_comm_build_api_caps();
ret = hclge_comm_cmd_send(hw, &desc, 1, is_pf);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret)
return ret;
......@@ -227,46 +226,32 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
return ret;
}
static bool hclge_is_elem_in_array(const u16 *spec_opcode, u32 size, u16 opcode)
static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
HCLGE_OPC_STATS_32_BIT,
HCLGE_OPC_STATS_MAC,
HCLGE_OPC_STATS_MAC_ALL,
HCLGE_OPC_QUERY_32_BIT_REG,
HCLGE_OPC_QUERY_64_BIT_REG,
HCLGE_QUERY_CLEAR_MPF_RAS_INT,
HCLGE_QUERY_CLEAR_PF_RAS_INT,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
HCLGE_QUERY_ALL_ERR_INFO };
static bool hclge_comm_is_special_opcode(u16 opcode)
{
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
u32 i;
for (i = 0; i < size; i++) {
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
if (spec_opcode[i] == opcode)
return true;
}
return false;
}
static const u16 pf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT,
HCLGE_COMM_OPC_STATS_32_BIT,
HCLGE_COMM_OPC_STATS_MAC,
HCLGE_COMM_OPC_STATS_MAC_ALL,
HCLGE_COMM_OPC_QUERY_32_BIT_REG,
HCLGE_COMM_OPC_QUERY_64_BIT_REG,
HCLGE_COMM_QUERY_CLEAR_MPF_RAS_INT,
HCLGE_COMM_QUERY_CLEAR_PF_RAS_INT,
HCLGE_COMM_QUERY_CLEAR_ALL_MPF_MSIX_INT,
HCLGE_COMM_QUERY_CLEAR_ALL_PF_MSIX_INT,
HCLGE_COMM_QUERY_ALL_ERR_INFO };
static const u16 vf_spec_opcode[] = { HCLGE_COMM_OPC_STATS_64_BIT,
HCLGE_COMM_OPC_STATS_32_BIT,
HCLGE_COMM_OPC_STATS_MAC };
static bool hclge_comm_is_special_opcode(u16 opcode, bool is_pf)
{
/* these commands have several descriptors,
* and use the first one to save opcode and return value
*/
const u16 *spec_opcode = is_pf ? pf_spec_opcode : vf_spec_opcode;
u32 size = is_pf ? ARRAY_SIZE(pf_spec_opcode) :
ARRAY_SIZE(vf_spec_opcode);
return hclge_is_elem_in_array(spec_opcode, size, opcode);
}
static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
{
int ntc = ring->next_to_clean;
......@@ -378,7 +363,7 @@ static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
struct hclge_desc *desc, int num,
int ntc, bool is_pf)
int ntc)
{
u16 opcode, desc_ret;
int handle;
......@@ -390,7 +375,7 @@ static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
if (ntc >= hw->cmq.csq.desc_num)
ntc = 0;
}
if (likely(!hclge_comm_is_special_opcode(opcode, is_pf)))
if (likely(!hclge_comm_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[num - 1].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
......@@ -402,7 +387,7 @@ static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
struct hclge_desc *desc,
int num, int ntc, bool is_pf)
int num, int ntc)
{
bool is_completed = false;
int handle, ret;
......@@ -416,7 +401,7 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
if (!is_completed)
ret = -EBADE;
else
ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc, is_pf);
ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
/* Clean the command send queue */
handle = hclge_comm_cmd_csq_clean(hw);
......@@ -433,13 +418,12 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
* @hw: pointer to the hw struct
* @desc: prefilled descriptor for describing the command
* @num : the number of descriptors to be sent
* @is_pf: bool to judge pf/vf module
*
* This is the main send command for command queue, it
* sends the queue, cleans the queue, etc
**/
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num, bool is_pf)
int num)
{
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
int ret;
......@@ -474,7 +458,7 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
hw->cmq.csq.next_to_use);
ret = hclge_comm_cmd_check_result(hw, desc, num, ntc, is_pf);
ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
......@@ -495,12 +479,12 @@ static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
}
void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, bool is_pf,
void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw)
{
struct hclge_comm_cmq *cmdq = &hw->cmq;
hclge_comm_firmware_compat_config(ae_dev, is_pf, hw, false);
hclge_comm_firmware_compat_config(ae_dev, hw, false);
set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
/* wait to ensure that the firmware completes the possible left
......@@ -612,7 +596,7 @@ int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
/* ask the firmware to enable some features, driver can work without
* it.
*/
ret = hclge_comm_firmware_compat_config(ae_dev, is_pf, hw, true);
ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
if (ret)
dev_warn(&ae_dev->pdev->dev,
"Firmware compatible features not enabled(%d).\n",
......
......@@ -55,6 +55,256 @@
#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
enum hclge_opcode_type {
/* Generic commands */
HCLGE_OPC_QUERY_FW_VER = 0x0001,
HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
HCLGE_OPC_GBL_RST_STATUS = 0x0021,
HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
HCLGE_OPC_PF_RST_DONE = 0x0026,
HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
HCLGE_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
HCLGE_OPC_DFX_BD_NUM = 0x0043,
HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
HCLGE_OPC_DFX_NCSI_REG = 0x004A,
HCLGE_OPC_DFX_RTC_REG = 0x004B,
HCLGE_OPC_DFX_PPP_REG = 0x004C,
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
/* PTP commands */
HCLGE_OPC_PTP_INT_EN = 0x0501,
HCLGE_OPC_PTP_MODE_CFG = 0x0507,
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
HCLGE_OPC_CFG_MAC_PARA = 0x0703,
HCLGE_OPC_CFG_PFC_PARA = 0x0704,
HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
HCLGE_OPC_QOS_MAP = 0x070A,
/* ETS/scheduler commands */
HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
HCLGE_OPC_TM_NODES = 0x0816,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
HCLGE_OPC_QSET_DFX_STS = 0x0844,
HCLGE_OPC_PRI_DFX_STS = 0x0845,
HCLGE_OPC_PG_DFX_STS = 0x0846,
HCLGE_OPC_PORT_DFX_STS = 0x0847,
HCLGE_OPC_SCH_NQ_CNT = 0x0848,
HCLGE_OPC_SCH_RQ_CNT = 0x0849,
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
/* TQP management command */
HCLGE_OPC_SET_TQP_MAP = 0x0A01,
/* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
/* PPU commands */
HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGE_OPC_RSS_TC_MODE = 0x0D08,
HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
/* Vlan offload commands */
HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
/* Interrupts commands */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
/* MAC commands */
HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
/* MAC VLAN commands */
HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
/* Flow Director commands */
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
HCLGE_OPC_FD_TCAM_OP = 0x1203,
HCLGE_OPC_FD_AD_OP = 0x1204,
HCLGE_OPC_FD_CNT_OP = 0x1205,
HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
HCLGE_OPC_FD_QB_CTRL = 0x1210,
HCLGE_OPC_FD_QB_AD_OP = 0x1211,
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
/* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
/* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
/* clear hardware resource command */
HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
/* IMP stats command */
HCLGE_OPC_IMP_STATS_BD = 0x7012,
HCLGE_OPC_IMP_STATS_INFO = 0x7013,
HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
HCLGE_OPC_GET_SFP_EXIST = 0x7101,
HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
HCLGE_MAC_COMMON_INT_EN = 0x030E,
HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
HCLGE_SSU_ECC_INT_CMD = 0x0989,
HCLGE_SSU_COMMON_INT_CMD = 0x098C,
HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
HCLGE_COMMON_ECC_INT_CFG = 0x1505,
HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
HCLGE_PPP_CMD0_INT_CMD = 0x2100,
HCLGE_PPP_CMD1_INT_CMD = 0x2101,
HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
HCLGE_NCSI_INT_EN = 0x2401,
/* ROH MAC commands */
HCLGE_OPC_MAC_ADDR_CHECK = 0x9004,
/* PHY command */
HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
HCLGE_OPC_PHY_REG = 0x7026,
/* Query link diagnosis info command */
HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
};
enum hclge_comm_cmd_return_status {
HCLGE_COMM_CMD_EXEC_SUCCESS = 0,
HCLGE_COMM_CMD_NO_AUTH = 1,
......@@ -70,20 +320,6 @@ enum hclge_comm_cmd_return_status {
HCLGE_COMM_CMD_INVALID = 11,
};
enum hclge_comm_special_cmd {
HCLGE_COMM_OPC_STATS_64_BIT = 0x0030,
HCLGE_COMM_OPC_STATS_32_BIT = 0x0031,
HCLGE_COMM_OPC_STATS_MAC = 0x0032,
HCLGE_COMM_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_COMM_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_COMM_OPC_QUERY_64_BIT_REG = 0x0042,
HCLGE_COMM_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
HCLGE_COMM_QUERY_CLEAR_PF_RAS_INT = 0x1512,
HCLGE_COMM_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_COMM_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
HCLGE_COMM_QUERY_ALL_ERR_INFO = 0x1517,
};
enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_UDP_GSO_B,
HCLGE_COMM_CAP_QB_B,
......@@ -108,11 +344,6 @@ enum HCLGE_COMM_API_CAP_BITS {
HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B,
};
enum hclge_comm_opcode_type {
HCLGE_COMM_OPC_QUERY_FW_VER = 0x0001,
HCLGE_COMM_OPC_IMP_COMPAT_CFG = 0x701A,
};
/* capabilities bits map between imp firmware and local driver */
struct hclge_comm_caps_bit_map {
u16 imp_bit;
......@@ -209,15 +440,15 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
u32 *fw_version, bool is_pf);
int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type);
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num, bool is_pf);
int num);
void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, bool is_pf,
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, bool en);
void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring);
void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_comm_opcode_type opcode,
enum hclge_opcode_type opcode,
bool is_read);
void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, bool is_pf,
void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw);
int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
......
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2021-2021 Hisilicon Limited.
#include <linux/skbuff.h>
#include "hnae3.h"
#include "hclge_comm_cmd.h"
#include "hclge_comm_rss.h"
static const u8 hclge_comm_hash_key[] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
static void
hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg)
{
rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
rss_tuple_cfg->ipv6_sctp_en =
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
}
int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_cfg *rss_cfg)
{
u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size;
int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
u16 *rss_ind_tbl;
if (nic->flags & HNAE3_SUPPORT_VF)
rss_cfg->rss_size = nic->kinfo.rss_size;
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets);
rss_cfg->rss_algo = rss_algo;
rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size,
sizeof(*rss_ind_tbl), GFP_KERNEL);
if (!rss_ind_tbl)
return -ENOMEM;
rss_cfg->rss_indirection_tbl = rss_ind_tbl;
memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key,
HCLGE_COMM_RSS_KEY_SIZE);
hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg);
return 0;
}
void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
{
u16 roundup_size;
u32 i;
roundup_size = roundup_pow_of_two(rss_size);
roundup_size = ilog2(roundup_size);
for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
tc_valid[i] = 1;
tc_size[i] = roundup_size;
tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
}
int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
{
struct hclge_comm_rss_tc_mode_cmd *req;
struct hclge_desc desc;
unsigned int i;
int ret;
req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data;
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
u16 mode = 0;
hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B,
(tc_valid[i] & 0x1));
hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M,
HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]);
hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B,
tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET &
0x1);
hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M,
HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode);
}
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret)
dev_err(&hw->cmq.csq.pdev->dev,
"failed to set rss tc mode, ret = %d.\n", ret);
return ret;
}
int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
struct hclge_comm_hw *hw, const u8 *key,
const u8 hfunc)
{
u8 hash_algo;
int ret;
ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo);
if (ret)
return ret;
/* Set the RSS Hash Key if specififed by the user */
if (key) {
ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key);
if (ret)
return ret;
/* Update the shadow RSS key with user specified qids */
memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE);
} else {
ret = hclge_comm_set_rss_algo_key(hw, hash_algo,
rss_cfg->rss_hash_key);
if (ret)
return ret;
}
rss_cfg->rss_algo = hash_algo;
return 0;
}
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
struct ethtool_rxnfc *nfc)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
if (nfc->data &
~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
false);
ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to init rss tuple cmd, ret = %d.\n", ret);
return ret;
}
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to set rss tuple, ret = %d.\n", ret);
return ret;
}
rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
return 0;
}
u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGE_COMM_RSS_KEY_SIZE;
}
void hclge_comm_get_rss_type(struct hnae3_handle *nic,
struct hclge_comm_rss_tuple_cfg *rss_tuple_sets)
{
if (rss_tuple_sets->ipv4_tcp_en ||
rss_tuple_sets->ipv4_udp_en ||
rss_tuple_sets->ipv4_sctp_en ||
rss_tuple_sets->ipv6_tcp_en ||
rss_tuple_sets->ipv6_udp_en ||
rss_tuple_sets->ipv6_sctp_en)
nic->kinfo.rss_type = PKT_HASH_TYPE_L4;
else if (rss_tuple_sets->ipv4_fragment_en ||
rss_tuple_sets->ipv6_fragment_en)
nic->kinfo.rss_type = PKT_HASH_TYPE_L3;
else
nic->kinfo.rss_type = PKT_HASH_TYPE_NONE;
}
int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
const u8 hfunc, u8 *hash_algo)
{
switch (hfunc) {
case ETH_RSS_HASH_TOP:
*hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
return 0;
case ETH_RSS_HASH_XOR:
*hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
return 0;
case ETH_RSS_HASH_NO_CHANGE:
*hash_algo = rss_cfg->rss_algo;
return 0;
default:
return -EINVAL;
}
}
void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_cfg *rss_cfg)
{
u16 i;
/* Initialize RSS indirect table */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
}
int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
u8 *tuple_sets)
{
switch (flow_type) {
case TCP_V4_FLOW:
*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
*tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
*tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
*tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT;
break;
default:
return -EINVAL;
}
return 0;
}
static void
hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
u16 qid, u32 j)
{
u8 rss_msb_oft;
u8 rss_msb_val;
rss_msb_oft =
j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
(j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
}
int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, const u16 *indir)
{
struct hclge_comm_rss_ind_tbl_cmd *req;
struct hclge_desc desc;
u16 rss_cfg_tbl_num;
int ret;
u16 qid;
u16 i;
u32 j;
req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data;
rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size /
HCLGE_COMM_RSS_CFG_TBL_SIZE;
for (i = 0; i < rss_cfg_tbl_num; i++) {
hclge_comm_cmd_setup_basic_desc(&desc,
HCLGE_OPC_RSS_INDIR_TABLE,
false);
req->start_table_index =
cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE);
req->rss_set_bitmap =
cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) {
qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
req->rss_qid_l[j] = qid & 0xff;
hclge_comm_append_rss_msb_info(req, qid, j);
}
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to configure rss table, ret = %d.\n",
ret);
return ret;
}
}
return 0;
}
int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
struct hclge_comm_hw *hw, bool is_pf,
struct hclge_comm_rss_cfg *rss_cfg)
{
struct hclge_comm_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
false);
req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
if (is_pf)
hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret)
dev_err(&hw->cmq.csq.pdev->dev,
"failed to configure rss input, ret = %d.\n", ret);
return ret;
}
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc)
{
/* Get hash algorithm */
if (hfunc) {
switch (rss_cfg->rss_algo) {
case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ:
*hfunc = ETH_RSS_HASH_TOP;
break;
case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE:
*hfunc = ETH_RSS_HASH_XOR;
break;
default:
*hfunc = ETH_RSS_HASH_UNKNOWN;
break;
}
}
/* Get the RSS Key required by the user */
if (key)
memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
}
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
u32 *indir, u16 rss_ind_tbl_size)
{
u16 i;
if (!indir)
return;
for (i = 0; i < rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
}
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key)
{
struct hclge_comm_rss_config_cmd *req;
unsigned int key_offset = 0;
struct hclge_desc desc;
int key_counts;
int key_size;
int ret;
key_counts = HCLGE_COMM_RSS_KEY_SIZE;
req = (struct hclge_comm_rss_config_cmd *)desc.data;
while (key_counts) {
hclge_comm_cmd_setup_basic_desc(&desc,
HCLGE_OPC_RSS_GENERIC_CONFIG,
false);
req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK);
req->hash_config |=
(key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B);
key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM,
key_size);
key_counts -= key_size;
key_offset++;
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to configure RSS key, ret = %d.\n",
ret);
return ret;
}
}
return 0;
}
static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
if (nfc->data & RXH_L4_B_2_3)
hash_sets |= HCLGE_COMM_D_PORT_BIT;
else
hash_sets &= ~HCLGE_COMM_D_PORT_BIT;
if (nfc->data & RXH_IP_SRC)
hash_sets |= HCLGE_COMM_S_IP_BIT;
else
hash_sets &= ~HCLGE_COMM_S_IP_BIT;
if (nfc->data & RXH_IP_DST)
hash_sets |= HCLGE_COMM_D_IP_BIT;
else
hash_sets &= ~HCLGE_COMM_D_IP_BIT;
if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
hash_sets |= HCLGE_COMM_V_TAG_BIT;
return hash_sets;
}
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
struct ethtool_rxnfc *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req)
{
u8 tuple_sets;
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
tuple_sets = hclge_comm_get_rss_hash_bits(nfc);
switch (nfc->flow_type) {
case TCP_V4_FLOW:
req->ipv4_tcp_en = tuple_sets;
break;
case TCP_V6_FLOW:
req->ipv6_tcp_en = tuple_sets;
break;
case UDP_V4_FLOW:
req->ipv4_udp_en = tuple_sets;
break;
case UDP_V6_FLOW:
req->ipv6_udp_en = tuple_sets;
break;
case SCTP_V4_FLOW:
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
(nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
break;
case IPV4_FLOW:
req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
break;
case IPV6_FLOW:
req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
break;
default:
return -EINVAL;
}
return 0;
}
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
{
u64 tuple_data = 0;
if (tuple_sets & HCLGE_COMM_D_PORT_BIT)
tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGE_COMM_S_PORT_BIT)
tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGE_COMM_D_IP_BIT)
tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGE_COMM_S_IP_BIT)
tuple_data |= RXH_IP_SRC;
return tuple_data;
}
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2021-2021 Hisilicon Limited.
#ifndef __HCLGE_COMM_RSS_H
#define __HCLGE_COMM_RSS_H
#include <linux/types.h>
#include "hnae3.h"
#include "hclge_comm_cmd.h"
#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGE_COMM_D_PORT_BIT BIT(0)
#define HCLGE_COMM_S_PORT_BIT BIT(1)
#define HCLGE_COMM_D_IP_BIT BIT(2)
#define HCLGE_COMM_S_IP_BIT BIT(3)
#define HCLGE_COMM_V_TAG_BIT BIT(4)
#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT)
#define HCLGE_COMM_MAX_TC_NUM 8
#define HCLGE_COMM_RSS_TC_OFFSET_S 0
#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11
#define HCLGE_COMM_RSS_TC_SIZE_S 12
#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_COMM_RSS_TC_VALID_B 15
#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3
struct hclge_comm_rss_tuple_cfg {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
};
#define HCLGE_COMM_RSS_KEY_SIZE 40
#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16
#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U
#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U
#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4
#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4
#define HCLGE_COMM_RSS_HASH_KEY_NUM 16
struct hclge_comm_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM];
};
struct hclge_comm_rss_cfg {
u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */
/* shadow table */
u16 *rss_indirection_tbl;
u32 rss_algo;
struct hclge_comm_rss_tuple_cfg rss_tuple_sets;
u32 rss_size;
};
struct hclge_comm_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
struct hclge_comm_rss_ind_tbl_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H];
u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE];
};
struct hclge_comm_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM];
u8 rsv[8];
};
u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
void hclge_comm_get_rss_type(struct hnae3_handle *nic,
struct hclge_comm_rss_tuple_cfg *rss_tuple_sets);
void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_cfg *rss_cfg);
int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
u8 *tuple_sets);
int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
const u8 hfunc, u8 *hash_algo);
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc);
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
u32 *indir, __le16 rss_ind_tbl_size);
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key);
int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
struct ethtool_rxnfc *nfc,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_input_tuple_cmd *req);
u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
struct hclge_comm_hw *hw, bool is_pf,
struct hclge_comm_rss_cfg *rss_cfg);
int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, const u16 *indir);
int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
struct hnae3_ae_dev *ae_dev,
struct hclge_comm_rss_cfg *rss_cfg);
void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size);
int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size);
int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
struct hclge_comm_hw *hw, const u8 *key,
const u8 hfunc);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg,
struct ethtool_rxnfc *nfc);
#endif
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2021-2021 Hisilicon Limited.
#include <linux/err.h>
#include "hnae3.h"
#include "hclge_comm_cmd.h"
#include "hclge_comm_tqp_stats.h"
u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_comm_tqp *tqp;
u64 *buff = data;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
return buff;
}
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
}
u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
buff += ETH_GSTRING_LEN;
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_comm_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
buff += ETH_GSTRING_LEN;
}
return buff;
}
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_comm_tqp *tqp;
struct hclge_desc desc;
int ret;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS,
true);
desc.data[0] = cpu_to_le32(tqp->index);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, tx = %u.\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[1]);
hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
true);
desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
ret = hclge_comm_cmd_send(hw, &desc, 1);
if (ret) {
dev_err(&hw->cmq.csq.pdev->dev,
"failed to get tqp stat, ret = %d, rx = %u.\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[1]);
}
return 0;
}
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
u16 i;
for (i = 0; i < kinfo->num_tqps; i++) {
queue = kinfo->tqp[i];
tqp = container_of(queue, struct hclge_comm_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
/* SPDX-License-Identifier: GPL-2.0+ */
// Copyright (c) 2021-2021 Hisilicon Limited.
#ifndef __HCLGE_COMM_TQP_STATS_H
#define __HCLGE_COMM_TQP_STATS_H
#include <linux/types.h>
#include <linux/etherdevice.h>
#include "hnae3.h"
/* each tqp has TX & RX two queues */
#define HCLGE_COMM_QUEUE_PAIR_SIZE 2
/* TQP stats */
struct hclge_comm_tqp_stats {
/* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
/* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
};
struct hclge_comm_tqp {
/* copy of device pointer from pci_dev,
* used when perform DMA mapping
*/
struct device *dev;
struct hnae3_queue q;
struct hclge_comm_tqp_stats tqp_stats;
u16 index; /* Global index in a NIC controller */
bool alloced;
};
u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw);
#endif
......@@ -20,252 +20,8 @@ struct hclge_misc_vector {
char name[HNAE3_INT_NAME_LEN];
};
enum hclge_opcode_type {
/* Generic commands */
HCLGE_OPC_QUERY_FW_VER = 0x0001,
HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
HCLGE_OPC_GBL_RST_STATUS = 0x0021,
HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
HCLGE_OPC_PF_RST_DONE = 0x0026,
HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
HCLGE_OPC_STATS_MAC = 0x0032,
HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
HCLGE_OPC_STATS_MAC_ALL = 0x0034,
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
HCLGE_OPC_DFX_BD_NUM = 0x0043,
HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
HCLGE_OPC_DFX_NCSI_REG = 0x004A,
HCLGE_OPC_DFX_RTC_REG = 0x004B,
HCLGE_OPC_DFX_PPP_REG = 0x004C,
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
/* PTP commands */
HCLGE_OPC_PTP_INT_EN = 0x0501,
HCLGE_OPC_PTP_MODE_CFG = 0x0507,
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
HCLGE_OPC_CFG_MAC_PARA = 0x0703,
HCLGE_OPC_CFG_PFC_PARA = 0x0704,
HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
HCLGE_OPC_QOS_MAP = 0x070A,
/* ETS/scheduler commands */
HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
HCLGE_OPC_TM_NODES = 0x0816,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
HCLGE_OPC_QSET_DFX_STS = 0x0844,
HCLGE_OPC_PRI_DFX_STS = 0x0845,
HCLGE_OPC_PG_DFX_STS = 0x0846,
HCLGE_OPC_PORT_DFX_STS = 0x0847,
HCLGE_OPC_SCH_NQ_CNT = 0x0848,
HCLGE_OPC_SCH_RQ_CNT = 0x0849,
HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
/* TQP management command */
HCLGE_OPC_SET_TQP_MAP = 0x0A01,
/* TQP commands */
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
/* PPU commands */
HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGE_OPC_RSS_TC_MODE = 0x0D08,
HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
/* Promisuous mode command */
HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
/* Vlan offload commands */
HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
/* Interrupts commands */
HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
/* MAC commands */
HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
/* MAC VLAN commands */
HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
/* Flow Director commands */
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
HCLGE_OPC_FD_TCAM_OP = 0x1203,
HCLGE_OPC_FD_AD_OP = 0x1204,
HCLGE_OPC_FD_CNT_OP = 0x1205,
HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
/* QCN commands */
HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
/* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
/* clear hardware resource command */
HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
/* IMP stats command */
HCLGE_OPC_IMP_STATS_BD = 0x7012,
HCLGE_OPC_IMP_STATS_INFO = 0x7013,
HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
HCLGE_OPC_GET_SFP_EXIST = 0x7101,
HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
HCLGE_MAC_COMMON_INT_EN = 0x030E,
HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
HCLGE_SSU_ECC_INT_CMD = 0x0989,
HCLGE_SSU_COMMON_INT_CMD = 0x098C,
HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
HCLGE_COMMON_ECC_INT_CFG = 0x1505,
HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
HCLGE_PPP_CMD0_INT_CMD = 0x2100,
HCLGE_PPP_CMD1_INT_CMD = 0x2101,
HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
HCLGE_NCSI_INT_EN = 0x2401,
/* PHY command */
HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
HCLGE_OPC_PHY_REG = 0x7026,
/* Query link diagnosis info command */
HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
};
#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
hclge_comm_cmd_setup_basic_desc(desc, (enum hclge_comm_opcode_type)opcode, \
is_read)
hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
......@@ -481,38 +237,10 @@ struct hclge_vf_num_cmd {
};
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
#define HCLGE_RSS_HASH_KEY_NUM 16
struct hclge_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
};
struct hclge_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
#define HCLGE_RSS_CFG_TBL_SIZE 16
#define HCLGE_RSS_CFG_TBL_SIZE_H 4
#define HCLGE_RSS_CFG_TBL_BW_H 2U
#define HCLGE_RSS_CFG_TBL_BW_L 8U
struct hclge_rss_indirection_table_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H];
u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE];
};
#define HCLGE_RSS_TC_OFFSET_S 0
#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGE_RSS_TC_SIZE_MSB_B 11
......@@ -520,10 +248,6 @@ struct hclge_rss_indirection_table_cmd {
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
#define HCLGE_RSS_TC_VALID_B 15
struct hclge_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
u8 rsv[8];
};
#define HCLGE_LINK_STATUS_UP_B 0
#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
......
......@@ -203,7 +203,7 @@ static int hclge_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
hclge_rss_indir_init_cfg(hdev);
hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
return hclge_rss_init_hw(hdev);
}
......
......@@ -371,14 +371,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
static const u8 hclge_hash_key[] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
static const u32 hclge_dfx_bd_offset_list[] = {
HCLGE_DFX_BIOS_BD_OFFSET,
HCLGE_DFX_SSU_0_BD_OFFSET,
......@@ -490,7 +482,7 @@ static const struct key_info tuple_key_info[] = {
**/
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
return hclge_comm_cmd_send(&hw->hw, desc, num, true);
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
......@@ -619,111 +611,6 @@ int hclge_mac_update_stats(struct hclge_dev *hdev)
return hclge_mac_update_stats_defective(hdev);
}
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hnae3_queue *queue;
struct hclge_desc desc[1];
struct hclge_tqp *tqp;
int ret, i;
for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
true);
desc[0].data[0] = cpu_to_le32(tqp->index);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
le32_to_cpu(desc[0].data[1]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_QUERY_TX_STATS,
true);
desc[0].data[0] = cpu_to_le32(tqp->index);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
le32_to_cpu(desc[0].data[1]);
}
return 0;
}
static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_tqp *tqp;
u64 *buff = data;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
return buff;
}
static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
/* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2);
}
static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
return buff;
}
static int hclge_comm_get_count(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
u32 size)
......@@ -784,7 +671,7 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
if (handle->client) {
status = hclge_tqps_update_stats(handle);
status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status) {
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
......@@ -814,7 +701,7 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
status = hclge_tqps_update_stats(handle);
status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
......@@ -863,7 +750,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
} else if (stringset == ETH_SS_STATS) {
count = hclge_comm_get_count(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string)) +
hclge_tqps_get_sset_count(handle, stringset);
hclge_comm_tqps_get_sset_count(handle);
}
return count;
......@@ -881,7 +768,7 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
p = hclge_tqps_get_strings(handle, p);
p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
......@@ -915,7 +802,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
p = hclge_comm_tqps_get_stats(handle, p);
}
static void hclge_get_mac_stat(struct hnae3_handle *handle,
......@@ -1495,7 +1382,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
......@@ -1535,7 +1422,7 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_qset_num)
......@@ -1756,11 +1643,11 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclge_tqp), GFP_KERNEL);
sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
......@@ -1884,8 +1771,8 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
kinfo = &nic->kinfo;
for (i = 0; i < vport->alloc_tqps; i++) {
struct hclge_tqp *q =
container_of(kinfo->tqp[i], struct hclge_tqp, q);
struct hclge_comm_tqp *q =
container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
bool is_pf;
int ret;
......@@ -4719,334 +4606,43 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGE_RSS_KEY_SIZE;
}
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclge_rss_config_cmd *req;
unsigned int key_offset = 0;
struct hclge_desc desc;
int key_counts;
int key_size;
int ret;
key_counts = HCLGE_RSS_KEY_SIZE;
req = (struct hclge_rss_config_cmd *)desc.data;
while (key_counts) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
false);
req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Configure RSS config fail, status = %d\n",
ret);
return ret;
}
}
return 0;
}
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
{
struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
int rss_cfg_tbl_num;
u8 rss_msb_oft;
u8 rss_msb_val;
int ret;
u16 qid;
int i;
u32 j;
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
HCLGE_RSS_CFG_TBL_SIZE;
for (i = 0; i < rss_cfg_tbl_num; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
req->start_table_index =
cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
req->rss_qid_l[j] = qid & 0xff;
rss_msb_oft =
j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Configure rss indir table fail,status = %d\n",
ret);
return ret;
}
}
return 0;
}
static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
u16 *tc_size, u16 *tc_offset)
{
struct hclge_rss_tc_mode_cmd *req;
struct hclge_desc desc;
int ret;
int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
req = (struct hclge_rss_tc_mode_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u16 mode = 0;
hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
HCLGE_RSS_TC_SIZE_S, tc_size[i]);
hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss tc mode fail, status = %d\n", ret);
return ret;
}
static void hclge_get_rss_type(struct hclge_vport *vport)
{
if (vport->rss_tuple_sets.ipv4_tcp_en ||
vport->rss_tuple_sets.ipv4_udp_en ||
vport->rss_tuple_sets.ipv4_sctp_en ||
vport->rss_tuple_sets.ipv6_tcp_en ||
vport->rss_tuple_sets.ipv6_udp_en ||
vport->rss_tuple_sets.ipv6_sctp_en)
vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
else if (vport->rss_tuple_sets.ipv4_fragment_en ||
vport->rss_tuple_sets.ipv6_fragment_en)
vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
else
vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
}
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
struct hclge_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
/* Get the tuple cfg from pf */
req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
hclge_get_rss_type(&hdev->vport[0]);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss input fail, status = %d\n", ret);
return ret;
}
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
int i;
/* Get hash algorithm */
if (hfunc) {
switch (vport->rss_algo) {
case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
*hfunc = ETH_RSS_HASH_TOP;
break;
case HCLGE_RSS_HASH_ALGO_SIMPLE:
*hfunc = ETH_RSS_HASH_XOR;
break;
default:
*hfunc = ETH_RSS_HASH_UNKNOWN;
break;
}
}
struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
/* Get the RSS Key required by the user */
if (key)
memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
/* Get indirect table */
if (indir)
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = vport->rss_indirection_tbl[i];
hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
u8 *hash_algo)
{
switch (hfunc) {
case ETH_RSS_HASH_TOP:
*hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
return 0;
case ETH_RSS_HASH_XOR:
*hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
return 0;
case ETH_RSS_HASH_NO_CHANGE:
*hash_algo = vport->rss_algo;
return 0;
default:
return -EINVAL;
}
}
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u8 hash_algo;
struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
if (ret) {
dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
return ret;
}
/* Set the RSS Hash Key if specififed by the user */
if (key) {
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
/* Update the shadow RSS key with user specified qids */
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
} else {
ret = hclge_set_rss_algo_key(hdev, hash_algo,
vport->rss_hash_key);
if (ret)
return ret;
}
vport->rss_algo = hash_algo;
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport->rss_indirection_tbl[i] = indir[i];
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
}
static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
if (nfc->data & RXH_L4_B_2_3)
hash_sets |= HCLGE_D_PORT_BIT;
else
hash_sets &= ~HCLGE_D_PORT_BIT;
if (nfc->data & RXH_IP_SRC)
hash_sets |= HCLGE_S_IP_BIT;
else
hash_sets &= ~HCLGE_S_IP_BIT;
if (nfc->data & RXH_IP_DST)
hash_sets |= HCLGE_D_IP_BIT;
else
hash_sets &= ~HCLGE_D_IP_BIT;
if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
hash_sets |= HCLGE_V_TAG_BIT;
return hash_sets;
}
static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
struct ethtool_rxnfc *nfc,
struct hclge_rss_input_tuple_cmd *req)
{
struct hclge_dev *hdev = vport->back;
u8 tuple_sets;
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
tuple_sets = hclge_get_rss_hash_bits(nfc);
switch (nfc->flow_type) {
case TCP_V4_FLOW:
req->ipv4_tcp_en = tuple_sets;
break;
case TCP_V6_FLOW:
req->ipv6_tcp_en = tuple_sets;
break;
case UDP_V4_FLOW:
req->ipv4_udp_en = tuple_sets;
break;
case UDP_V6_FLOW:
req->ipv6_udp_en = tuple_sets;
break;
case SCTP_V4_FLOW:
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
(nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
break;
case IPV4_FLOW:
req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
break;
case IPV6_FLOW:
req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
break;
default:
return -EINVAL;
}
return 0;
return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
rss_cfg->rss_indirection_tbl);
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
......@@ -5054,92 +4650,20 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init rss tuple cmd, ret = %d\n", ret);
return ret;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
&hdev->rss_cfg, nfc);
if (ret) {
dev_err(&hdev->pdev->dev,
"Set rss tuple fail, status = %d\n", ret);
"failed to set rss tuple, ret = %d.\n", ret);
return ret;
}
vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
hclge_get_rss_type(vport);
return 0;
}
static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
u8 *tuple_sets)
{
switch (flow_type) {
case TCP_V4_FLOW:
*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
break;
default:
return -EINVAL;
}
hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
return 0;
}
static u64 hclge_convert_rss_tuple(u8 tuple_sets)
{
u64 tuple_data = 0;
if (tuple_sets & HCLGE_D_PORT_BIT)
tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGE_S_PORT_BIT)
tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGE_D_IP_BIT)
tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGE_S_IP_BIT)
tuple_data |= RXH_IP_SRC;
return tuple_data;
}
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
......@@ -5149,11 +4673,12 @@ static int hclge_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
&tuple_sets);
if (ret || !tuple_sets)
return ret;
nfc->data = hclge_convert_rss_tuple(tuple_sets);
nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
......@@ -5206,78 +4731,35 @@ static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
tc_offset[i] = tc_info->tqp_offset[i];
}
return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
tc_size);
}
int hclge_rss_init_hw(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
u16 *rss_indir = vport[0].rss_indirection_tbl;
u8 *key = vport[0].rss_hash_key;
u8 hfunc = vport[0].rss_algo;
u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
u8 *key = hdev->rss_cfg.rss_hash_key;
u8 hfunc = hdev->rss_cfg.rss_algo;
int ret;
ret = hclge_set_rss_indir_table(hdev, rss_indir);
ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
rss_indir);
if (ret)
return ret;
ret = hclge_set_rss_algo_key(hdev, hfunc, key);
ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
if (ret)
return ret;
ret = hclge_set_rss_input_tuple(hdev);
ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
&hdev->hw.hw, true,
&hdev->rss_cfg);
if (ret)
return ret;
return hclge_init_rss_tc_mode(hdev);
}
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = &hdev->vport[0];
int i;
for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
}
static int hclge_rss_init_cfg(struct hclge_dev *hdev)
{
u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = &hdev->vport[0];
u16 *rss_ind_tbl;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
vport->rss_tuple_sets.ipv6_sctp_en =
hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
vport->rss_algo = rss_algo;
rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
sizeof(*rss_ind_tbl), GFP_KERNEL);
if (!rss_ind_tbl)
return -ENOMEM;
vport->rss_indirection_tbl = rss_ind_tbl;
memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
hclge_rss_indir_init_cfg(hdev);
return 0;
}
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
struct hnae3_ring_chain_node *ring_chain)
......@@ -8282,22 +7764,6 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
HNAE3_LOOP_PARALLEL_SERDES);
}
static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo;
struct hnae3_queue *queue;
struct hclge_tqp *tqp;
int i;
kinfo = &vport->nic.kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
static void hclge_flush_link_update(struct hclge_dev *hdev)
{
#define HCLGE_FLUSH_LINK_TIMEOUT 100000
......@@ -8339,7 +7805,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hdev->hw.mac.link = 0;
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
hclge_comm_reset_tqp_stats(handle);
hclge_mac_start_phy(hdev);
......@@ -8377,7 +7843,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_mac_stop_phy(hdev);
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
hclge_comm_reset_tqp_stats(handle);
hclge_update_link_status(hdev);
}
......@@ -10990,11 +10456,11 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
struct hclge_tqp *tqp;
queue = handle->kinfo.tqp[queue_id];
tqp = container_of(queue, struct hclge_tqp, q);
tqp = container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
......@@ -11883,7 +11349,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
ret = hclge_rss_init_cfg(hdev);
ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
&hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_mdiobus_unreg;
......@@ -11968,7 +11435,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, true, &hdev->hw.hw);
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
hclge_devlink_uninit(hdev);
err_pci_uninit:
......@@ -12360,7 +11827,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_config_nic_hw_error(hdev, false);
hclge_config_rocee_ras_interrupt(hdev, false);
hclge_comm_cmd_uninit(hdev->ae_dev, true, &hdev->hw.hw);
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
......@@ -12420,7 +11887,8 @@ static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
tc_offset[i] = vport->nic.kinfo.rss_size * i;
}
return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
tc_size);
}
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
......@@ -13213,7 +12681,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.check_port_speed = hclge_check_port_speed,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
.get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
......
......@@ -13,6 +13,8 @@
#include "hclge_cmd.h"
#include "hclge_ptp.h"
#include "hnae3.h"
#include "hclge_comm_rss.h"
#include "hclge_comm_tqp_stats.h"
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
......@@ -80,22 +82,6 @@
#define HCLGE_TQP_INTR_RL_REG 0x20900
#define HCLGE_RSS_IND_TBL_SIZE 512
#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_RSS_KEY_SIZE 40
#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGE_D_PORT_BIT BIT(0)
#define HCLGE_S_PORT_BIT BIT(1)
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
......@@ -285,26 +271,6 @@ struct hclge_hw {
int num_vec;
};
/* TQP stats */
struct hlcge_tqp_stats {
/* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
/* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
};
struct hclge_tqp {
/* copy of device pointer from pci_dev,
* used when perform DMA mapping
*/
struct device *dev;
struct hnae3_queue q;
struct hlcge_tqp_stats tqp_stats;
u16 index; /* Global index in a NIC controller */
bool alloced;
};
enum hclge_fc_mode {
HCLGE_FC_NONE,
HCLGE_FC_RX_PAUSE,
......@@ -909,7 +875,7 @@ struct hclge_dev {
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
struct hclge_tqp *htqp;
struct hclge_comm_tqp *htqp;
struct hclge_vport *vport;
struct dentry *hclge_dbgfs;
......@@ -968,6 +934,7 @@ struct hclge_dev {
cpumask_t affinity_mask;
struct hclge_ptp *ptp;
struct devlink *devlink;
struct hclge_comm_rss_cfg rss_cfg;
};
/* VPort level vlan tag configuration for TX direction */
......@@ -994,17 +961,6 @@ struct hclge_rx_vtag_cfg {
bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
};
struct hclge_rss_tuple_cfg {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
};
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
......@@ -1038,15 +994,6 @@ struct hclge_vf_info {
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
u16 *rss_indirection_tbl;
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
u16 alloc_rss_size;
u16 qs_offset;
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
......@@ -1107,7 +1054,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
struct hclge_comm_tqp *tqp =
container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
......@@ -1125,7 +1073,6 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle);
......
......@@ -4,6 +4,7 @@
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
#include "hclge_comm_rss.h"
#define CREATE_TRACE_POINTS
#include "hclge_trace.h"
......@@ -612,15 +613,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
struct hclge_comm_rss_cfg *rss_cfg;
u8 index;
index = mbx_req->msg.data[0];
rss_cfg = &hdev->rss_cfg;
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
sizeof(vport[0].rss_hash_key)) {
sizeof(rss_cfg->rss_hash_key)) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
......@@ -628,7 +631,7 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
}
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
&rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
}
......
......@@ -678,8 +678,8 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
hclge_tm_update_kinfo_rss_size(vport);
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
hdev->rss_cfg.rss_size = kinfo->rss_size;
/* when enable mqprio, the tc_info has been updated. */
if (kinfo->tc_info.mqprio_active)
......
......@@ -16,30 +16,6 @@ struct hclgevf_dev;
#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* GRO command */
HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
/* Mailbox cmd */
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
/* IMP stats command */
HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A,
};
#define HCLGEVF_TQP_REG_OFFSET 0x80000
#define HCLGEVF_TQP_REG_SIZE 0x200
......@@ -97,50 +73,6 @@ struct hclgevf_cfg_gro_status_cmd {
u8 rsv[23];
};
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
struct hclgevf_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
};
struct hclgevf_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
u8 rsv[16];
};
#define HCLGEVF_RSS_CFG_TBL_SIZE 16
struct hclgevf_rss_indirection_table_cmd {
__le16 start_table_index;
__le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
};
#define HCLGEVF_RSS_TC_OFFSET_S 0
#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
#define HCLGEVF_RSS_TC_SIZE_S 12
#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
struct hclgevf_rss_tc_mode_cmd {
__le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];
};
#define HCLGEVF_LINK_STS_B 0
#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
struct hclgevf_link_status_cmd {
......@@ -177,8 +109,7 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \
hclge_comm_cmd_setup_basic_desc(desc, (enum hclge_comm_opcode_type)opcode, \
is_read)
hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
struct hclgevf_dev_specs_0_cmd {
__le32 rsv0;
......
......@@ -9,6 +9,7 @@
#include "hclge_mbx.h"
#include "hnae3.h"
#include "hclgevf_devlink.h"
#include "hclge_comm_rss.h"
#define HCLGEVF_NAME "hclgevf"
......@@ -30,14 +31,6 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
{0, }
};
static const u8 hclgevf_hash_key[] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
......@@ -102,7 +95,7 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
*/
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
{
return hclge_comm_cmd_send(&hw->hw, desc, num, false);
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
void hclgevf_arq_init(struct hclgevf_dev *hdev)
......@@ -128,108 +121,13 @@ static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
return container_of(handle, struct hclgevf_dev, nic);
}
static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_desc desc;
struct hclgevf_tqp *tqp;
int status;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_QUERY_RX_STATUS,
true);
desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
status, i);
return status;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[1]);
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
true);
desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
status, i);
return status;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
le32_to_cpu(desc.data[1]);
}
return 0;
}
static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_tqp *tqp;
u64 *buff = data;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
return buff;
}
static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
return kinfo->num_tqps * 2;
}
static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
return buff;
}
static void hclgevf_update_stats(struct hnae3_handle *handle,
struct net_device_stats *net_stats)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int status;
status = hclgevf_tqps_update_stats(handle);
status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"VF update of TQPS stats fail, status = %d.\n",
......@@ -241,7 +139,7 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
if (strset == ETH_SS_TEST)
return -EOPNOTSUPP;
else if (strset == ETH_SS_STATS)
return hclgevf_tqps_get_sset_count(handle, strset);
return hclge_comm_tqps_get_sset_count(handle);
return 0;
}
......@@ -252,12 +150,12 @@ static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
u8 *p = (char *)data;
if (strset == ETH_SS_STATS)
p = hclgevf_tqps_get_strings(handle, p);
p = hclge_comm_tqps_get_strings(handle, p);
}
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
{
hclgevf_tqps_get_stats(handle, data);
hclge_comm_tqps_get_stats(handle, data);
}
static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
......@@ -423,11 +321,11 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclgevf_tqp), GFP_KERNEL);
sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
......@@ -474,7 +372,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
kinfo->num_tx_desc = hdev->num_tx_desc;
kinfo->num_rx_desc = hdev->num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
num_tc++;
......@@ -632,137 +530,11 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
return -EINVAL;
}
static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
const u8 hfunc, const u8 *key)
{
struct hclgevf_rss_config_cmd *req;
unsigned int key_offset = 0;
struct hclge_desc desc;
int key_counts;
int key_size;
int ret;
key_counts = HCLGEVF_RSS_KEY_SIZE;
req = (struct hclgevf_rss_config_cmd *)desc.data;
while (key_counts) {
hclgevf_cmd_setup_basic_desc(&desc,
HCLGEVF_OPC_RSS_GENERIC_CONFIG,
false);
req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
req->hash_config |=
(key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
memcpy(req->hash_key,
key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
key_counts -= key_size;
key_offset++;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Configure RSS config fail, status = %d\n",
ret);
return ret;
}
}
return 0;
}
static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGEVF_RSS_KEY_SIZE;
}
static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
{
const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
struct hclgevf_rss_indirection_table_cmd *req;
struct hclge_desc desc;
int rss_cfg_tbl_num;
int status;
int i, j;
req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
HCLGEVF_RSS_CFG_TBL_SIZE;
for (i = 0; i < rss_cfg_tbl_num; i++) {
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
false);
req->start_table_index =
cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
req->rss_result[j] =
indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) {
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to set RSS indirection table\n",
status);
return status;
}
}
return 0;
}
static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
{
struct hclgevf_rss_tc_mode_cmd *req;
u16 tc_offset[HCLGEVF_MAX_TC_NUM];
u16 tc_valid[HCLGEVF_MAX_TC_NUM];
u16 tc_size[HCLGEVF_MAX_TC_NUM];
struct hclge_desc desc;
u16 roundup_size;
unsigned int i;
int status;
req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
roundup_size = roundup_pow_of_two(rss_size);
roundup_size = ilog2(roundup_size);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
tc_valid[i] = 1;
tc_size[i] = roundup_size;
tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
u16 mode = 0;
hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
(tc_valid[i] & 0x1));
hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
0x1);
hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
req->rss_tc_mode[i] = cpu_to_le16(mode);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to set rss tc mode\n", status);
return status;
}
/* for revision 0x20, vf shared the same rss config with pf */
static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RSS_MBX_RESP_LEN 8
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
struct hclge_vf_to_pf_msg send_msg;
u16 msg_num, hash_key_index;
......@@ -770,7 +542,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
HCLGEVF_RSS_MBX_RESP_LEN;
for (index = 0; index < msg_num; index++) {
send_msg.data[0] = index;
......@@ -787,7 +559,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
if (index == msg_num - 1)
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0],
HCLGEVF_RSS_KEY_SIZE - hash_key_index);
HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
else
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
......@@ -800,29 +572,11 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int i, ret;
struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* Get hash algorithm */
if (hfunc) {
switch (rss_cfg->hash_algo) {
case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
*hfunc = ETH_RSS_HASH_TOP;
break;
case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
*hfunc = ETH_RSS_HASH_XOR;
break;
default:
*hfunc = ETH_RSS_HASH_UNKNOWN;
break;
}
}
/* Get the RSS Key required by the user */
if (key)
memcpy(key, rss_cfg->rss_hash_key,
HCLGEVF_RSS_KEY_SIZE);
hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
} else {
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
......@@ -831,67 +585,28 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
if (ret)
return ret;
memcpy(key, rss_cfg->rss_hash_key,
HCLGEVF_RSS_KEY_SIZE);
HCLGE_COMM_RSS_KEY_SIZE);
}
}
if (indir)
for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
hdev->ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
u8 *hash_algo)
{
switch (hfunc) {
case ETH_RSS_HASH_TOP:
*hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
return 0;
case ETH_RSS_HASH_XOR:
*hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
return 0;
case ETH_RSS_HASH_NO_CHANGE:
*hash_algo = hdev->rss_cfg.hash_algo;
return 0;
default:
return -EINVAL;
}
}
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 hash_algo;
struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
hfunc);
if (ret)
return ret;
/* Set the RSS Hash Key if specififed by the user */
if (key) {
ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
if (ret) {
dev_err(&hdev->pdev->dev,
"invalid hfunc type %u\n", hfunc);
return ret;
}
/* Update the shadow RSS key with user specified qids */
memcpy(rss_cfg->rss_hash_key, key,
HCLGEVF_RSS_KEY_SIZE);
} else {
ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
rss_cfg->rss_hash_key);
if (ret)
return ret;
}
rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
......@@ -899,179 +614,26 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
return hclgevf_set_rss_indir_table(hdev);
}
static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
{
u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
if (nfc->data & RXH_L4_B_2_3)
hash_sets |= HCLGEVF_D_PORT_BIT;
else
hash_sets &= ~HCLGEVF_D_PORT_BIT;
if (nfc->data & RXH_IP_SRC)
hash_sets |= HCLGEVF_S_IP_BIT;
else
hash_sets &= ~HCLGEVF_S_IP_BIT;
if (nfc->data & RXH_IP_DST)
hash_sets |= HCLGEVF_D_IP_BIT;
else
hash_sets &= ~HCLGEVF_D_IP_BIT;
if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
hash_sets |= HCLGEVF_V_TAG_BIT;
return hash_sets;
}
static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc,
struct hclgevf_rss_input_tuple_cmd *req)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 tuple_sets;
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
tuple_sets = hclgevf_get_rss_hash_bits(nfc);
switch (nfc->flow_type) {
case TCP_V4_FLOW:
req->ipv4_tcp_en = tuple_sets;
break;
case TCP_V6_FLOW:
req->ipv6_tcp_en = tuple_sets;
break;
case UDP_V4_FLOW:
req->ipv4_udp_en = tuple_sets;
break;
case UDP_V6_FLOW:
req->ipv6_udp_en = tuple_sets;
break;
case SCTP_V4_FLOW:
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
(nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
break;
case IPV4_FLOW:
req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
break;
case IPV6_FLOW:
req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
break;
default:
return -EINVAL;
}
return 0;
return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
rss_cfg->rss_indirection_tbl);
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
if (nfc->data &
~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init rss tuple cmd, ret = %d\n", ret);
return ret;
}
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
&hdev->rss_cfg, nfc);
if (ret)
dev_err(&hdev->pdev->dev,
"Set rss tuple fail, status = %d\n", ret);
return ret;
}
"failed to set rss tuple, ret = %d.\n", ret);
rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
return 0;
}
static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
int flow_type, u8 *tuple_sets)
{
switch (flow_type) {
case TCP_V4_FLOW:
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
*tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
*tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
break;
default:
return -EINVAL;
}
return 0;
}
static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
{
u64 tuple_data = 0;
if (tuple_sets & HCLGEVF_D_PORT_BIT)
tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGEVF_S_PORT_BIT)
tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGEVF_D_IP_BIT)
tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGEVF_S_IP_BIT)
tuple_data |= RXH_IP_SRC;
return tuple_data;
return ret;
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
......@@ -1086,47 +648,20 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
&tuple_sets);
ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
&tuple_sets);
if (ret || !tuple_sets)
return ret;
nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
struct hclgevf_rss_cfg *rss_cfg)
{
struct hclgevf_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"Configure rss input fail, status = %d\n", ret);
return ret;
}
static int hclgevf_get_tc_size(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
return rss_cfg->rss_size;
}
......@@ -1303,8 +838,7 @@ static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
false);
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
if (enable)
......@@ -1328,18 +862,6 @@ static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
return 0;
}
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_tqp *tqp;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
{
struct hclge_vf_to_pf_msg send_msg;
......@@ -2403,7 +1925,7 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
}
if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
hclgevf_tqps_update_stats(handle);
hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
/* VF does not need to request link status when this bit is set, because
* PF will push its link status to VFs when link status changed.
......@@ -2604,7 +2126,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
if (!hnae3_dev_gro_supported(hdev))
return 0;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
false);
req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
......@@ -2618,71 +2140,37 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
return ret;
}
static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
{
u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_tuple_cfg *tuple_sets;
u32 i;
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
u8 *rss_ind_tbl;
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
sizeof(*rss_ind_tbl), GFP_KERNEL);
if (!rss_ind_tbl)
return -ENOMEM;
rss_cfg->rss_indirection_tbl = rss_ind_tbl;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_sctp_en =
hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}
/* Initialize RSS indirect table */
for (i = 0; i < rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
return 0;
}
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key);
ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
rss_cfg->rss_algo,
rss_cfg->rss_hash_key);
if (ret)
return ret;
ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw,
false, rss_cfg);
if (ret)
return ret;
}
ret = hclgevf_set_rss_indir_table(hdev);
ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
rss_cfg->rss_indirection_tbl);
if (ret)
return ret;
return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
tc_offset, tc_valid, tc_size);
return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
tc_valid, tc_size);
}
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
......@@ -2736,7 +2224,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
hclgevf_reset_tqp_stats(handle);
hclge_comm_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
......@@ -2754,7 +2242,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hdev->reset_type != HNAE3_VF_RESET)
hclgevf_reset_tqp(handle);
hclgevf_reset_tqp_stats(handle);
hclge_comm_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}
......@@ -3149,7 +2637,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
struct hclge_desc desc;
int ret;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
......@@ -3203,7 +2691,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num =
HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
......@@ -3236,7 +2724,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
......@@ -3259,11 +2747,10 @@ static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclgevf_cmd_setup_basic_desc(&desc[i],
HCLGEVF_OPC_QUERY_DEV_SPECS, true);
HCLGE_OPC_QUERY_DEV_SPECS, true);
desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
true);
hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
if (ret)
......@@ -3451,7 +2938,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_cfg(hdev);
ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
&hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_config;
......@@ -3498,7 +2986,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
err_cmd_init:
hclge_comm_cmd_uninit(hdev->ae_dev, false, &hdev->hw.hw);
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
hclgevf_devlink_uninit(hdev);
err_devlink_init:
......@@ -3522,7 +3010,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_uninit_msi(hdev);
}
hclge_comm_cmd_uninit(hdev->ae_dev, false, &hdev->hw.hw);
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclgevf_devlink_uninit(hdev);
hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
......@@ -3625,6 +3113,9 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
u32 *rss_indir;
......@@ -3633,7 +3124,10 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
hclgevf_update_rss_size(handle, new_tqps_num);
ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
tc_offset, tc_valid, tc_size);
ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
tc_valid, tc_size);
if (ret)
return ret;
......@@ -3892,7 +3386,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
.get_rss_key_size = hclgevf_get_rss_key_size,
.get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
......
......@@ -10,6 +10,8 @@
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
#include "hnae3.h"
#include "hclge_comm_rss.h"
#include "hclge_comm_tqp_stats.h"
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
......@@ -93,22 +95,6 @@
#define HCLGEVF_WAIT_RESET_DONE 100
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
#define HCLGEVF_RSS_KEY_SIZE 40
#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGEVF_D_PORT_BIT BIT(0)
#define HCLGEVF_S_PORT_BIT BIT(1)
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_MAC_MAX_FRAME 9728
......@@ -163,23 +149,6 @@ struct hclgevf_hw {
struct hclgevf_mac mac;
};
/* TQP stats */
struct hlcgevf_tqp_stats {
/* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */
u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
/* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */
u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
};
struct hclgevf_tqp {
struct device *dev; /* device for DMA mapping */
struct hnae3_queue q;
struct hlcgevf_tqp_stats tqp_stats;
u16 index; /* global index in a NIC controller */
bool alloced;
};
struct hclgevf_cfg {
u8 tc_num;
u16 tqp_desc_num;
......@@ -190,27 +159,6 @@ struct hclgevf_cfg {
u32 numa_node_map;
};
struct hclgevf_rss_tuple_cfg {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
};
struct hclgevf_rss_cfg {
u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
/* shadow table */
u8 *rss_indirection_tbl;
struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
struct hclgevf_misc_vector {
u8 __iomem *addr;
int vector_irq;
......@@ -255,7 +203,7 @@ struct hclgevf_dev {
struct hnae3_ae_dev *ae_dev;
struct hclgevf_hw hw;
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
struct hclge_comm_rss_cfg rss_cfg;
unsigned long state;
unsigned long flr_state;
unsigned long default_reset_request;
......@@ -306,7 +254,7 @@ struct hclgevf_dev {
struct delayed_work service_task;
struct hclgevf_tqp *htqp;
struct hclge_comm_tqp *htqp;
struct hnae3_handle nic;
struct hnae3_handle roce;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment