Commit 78936acc authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-hns3-updates-for-next'

Huazhong Tan says:

====================
net: hns3: updates for -next

This series adds some code optimizations and compatibility
handlings for the HNS3 ethernet driver.

change log:
V2: refactor #2 as Jukub Kicinski reported and remove the part
    about RSS size which will not be different in different hw.
    updates netdev->max_mtu as well in #4 reported by Jakub Kicinski.

previous version:
V1: https://patchwork.kernel.org/project/netdevbpf/cover/1612269593-18691-1-git-send-email-tanhuazhong@huawei.com/
====================

Link: https://lore.kernel.org/r/1612513969-9278-1-git-send-email-tanhuazhong@huawei.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 74c05b9f 3f094bd1
......@@ -284,6 +284,8 @@ struct hnae3_dev_specs {
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
u16 max_frm_size;
u16 max_qset_num;
};
struct hnae3_client_ops {
......@@ -410,8 +412,6 @@ struct hnae3_ae_dev {
* Get the len of the regs dump
* get_rss_key_size()
* Get rss key size
* get_rss_indir_size()
* Get rss indirection table size
* get_rss()
* Get rss table
* set_rss()
......@@ -555,7 +555,6 @@ struct hnae3_ae_ops {
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
u32 (*get_rss_indir_size)(struct hnae3_handle *handle);
int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc);
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
......
......@@ -389,6 +389,9 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
kinfo->tc_info.num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
dev_info(priv->dev, "MAX frame size: %u\n", dev_specs->max_frm_size);
dev_info(priv->dev, "MAX TM RATE: %uMbps\n", dev_specs->max_tm_rate);
dev_info(priv->dev, "MAX QSET number: %u\n", dev_specs->max_qset_num);
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
......
......@@ -4281,8 +4281,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_dbg_init(handle);
/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
netdev->max_mtu = HNS3_MAX_MTU;
netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
......
......@@ -56,9 +56,8 @@ enum hns3_nic_state {
#define HNS3_RING_MIN_PENDING 72
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
#define HNS3_MAC_MAX_FRAME 9728
#define HNS3_MAX_MTU \
(HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_MAX_MTU(max_frm_size) \
((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
......
......@@ -859,11 +859,9 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
if (!h->ae_algo->ops->get_rss_indir_size)
return 0;
return h->ae_algo->ops->get_rss_indir_size(h);
return ae_dev->dev_specs.rss_ind_tbl_size;
}
static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
......
......@@ -363,6 +363,15 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
}
static __le32 hclge_build_api_caps(void)
{
u32 api_caps = 0;
hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
return cpu_to_le32(api_caps);
}
static enum hclge_cmd_status
hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
{
......@@ -373,6 +382,7 @@ hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_query_version_cmd *)desc.data;
resp->api_caps = hclge_build_api_caps();
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
......
......@@ -386,11 +386,15 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
};
enum HCLGE_API_CAP_BITS {
HCLGE_API_CAP_FLEX_RSS_TBL_B,
};
#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd {
__le32 firmware;
__le32 hardware;
__le32 rsv;
__le32 api_caps;
__le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
};
......@@ -1127,7 +1131,8 @@ struct hclge_dev_specs_0_cmd {
#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
struct hclge_dev_specs_1_cmd {
__le32 rsv0;
__le16 max_frm_size;
__le16 max_qset_num;
__le16 max_int_gl;
u8 rsv1[18];
};
......
......@@ -1599,8 +1599,6 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
const char *cmd_buf)
{
#define HCLGE_MAX_QSET_NUM 1024
u16 qsid;
int ret;
......@@ -1610,9 +1608,9 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
return;
}
if (qsid >= HCLGE_MAX_QSET_NUM) {
dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
qsid);
if (qsid >= hdev->ae_dev->dev_specs.max_qset_num) {
dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-%u]\n",
qsid, hdev->ae_dev->dev_specs.max_qset_num - 1);
return;
}
......
......@@ -55,8 +55,6 @@
#define HCLGE_LINK_STATUS_MS 10
#define HCLGE_VF_VPORT_START_NUM 1
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
......@@ -1373,6 +1371,8 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
......@@ -1391,7 +1391,9 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
......@@ -1406,8 +1408,12 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_qset_num)
dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
......@@ -4237,11 +4243,6 @@ static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
return HCLGE_RSS_KEY_SIZE;
}
static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
{
return HCLGE_RSS_IND_TBL_SIZE;
}
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
......@@ -4283,6 +4284,7 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
{
struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
int rss_cfg_tbl_num;
u8 rss_msb_oft;
u8 rss_msb_val;
int ret;
......@@ -4291,8 +4293,10 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
u32 j;
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
HCLGE_RSS_CFG_TBL_SIZE;
for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
for (i = 0; i < rss_cfg_tbl_num; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
......@@ -4398,6 +4402,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
int i;
......@@ -4422,7 +4427,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
/* Get indirect table */
if (indir)
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = vport->rss_indirection_tbl[i];
return 0;
......@@ -4431,6 +4436,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u8 hash_algo;
......@@ -4462,7 +4468,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
......@@ -4703,14 +4709,15 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
int i, j;
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport[j].rss_indirection_tbl[i] =
i % vport[j].alloc_rss_size;
}
}
static void hclge_rss_init_cfg(struct hclge_dev *hdev)
static int hclge_rss_init_cfg(struct hclge_dev *hdev)
{
u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
......@@ -4718,6 +4725,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
u16 *rss_ind_tbl;
vport[i].rss_tuple_sets.ipv4_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv4_udp_en =
......@@ -4739,11 +4748,19 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_algo = rss_algo;
rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
sizeof(*rss_ind_tbl), GFP_KERNEL);
if (!rss_ind_tbl)
return -ENOMEM;
vport[i].rss_indirection_tbl = rss_ind_tbl;
memcpy(vport[i].rss_hash_key, hclge_hash_key,
HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
return 0;
}
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
......@@ -9664,7 +9681,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
max_frm_size > HCLGE_MAC_MAX_FRAME)
max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
......@@ -10581,7 +10598,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
hclge_rss_init_cfg(hdev);
ret = hclge_rss_init_cfg(hdev);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_mdiobus_unreg;
}
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
......@@ -11072,6 +11094,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
......@@ -11115,11 +11138,12 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
ret = hclge_set_rss(handle, rss_indir, NULL, 0);
......@@ -11799,7 +11823,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
.get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
......
......@@ -17,6 +17,8 @@
#define HCLGE_MAX_PF_NUM 8
#define HCLGE_VF_VPORT_START_NUM 1
#define HCLGE_RD_FIRST_STATS_NUM 2
#define HCLGE_RD_OTHER_STATS_NUM 4
......@@ -97,8 +99,6 @@
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
#define HCLGE_RSS_CFG_TBL_NUM \
(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
......@@ -148,6 +148,8 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
#define HCLGE_MAX_QSET_NUM 1024
enum HLCGE_PORT_TYPE {
HOST_PORT,
NETWORK_PORT
......@@ -922,7 +924,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
u16 *rss_indirection_tbl;
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
......
......@@ -640,13 +640,18 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
kinfo->tc_info.num_tc = vport->vport_id ? 1 :
if (vport->vport_id) {
kinfo->tc_info.num_tc = 1;
vport->qs_offset = HNAE3_MAX_TC +
vport->vport_id - HCLGE_VF_VPORT_START_NUM;
vport_max_rss_size = hdev->vf_rss_size_max;
} else {
kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
(vport->vport_id ? (vport->vport_id - 1) : 0);
vport->qs_offset = 0;
vport_max_rss_size = hdev->pf_rss_size_max;
}
vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max :
hdev->pf_rss_size_max;
max_rss_size = min_t(u16, vport_max_rss_size,
hclge_vport_get_max_rss_size(vport));
......
......@@ -342,6 +342,15 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
static __le32 hclgevf_build_api_caps(void)
{
u32 api_caps = 0;
hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
return cpu_to_le32(api_caps);
}
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
......@@ -352,6 +361,7 @@ static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
resp = (struct hclgevf_query_version_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
resp->api_caps = hclgevf_build_api_caps();
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
return status;
......
......@@ -161,11 +161,15 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
};
enum HCLGEVF_API_CAP_BITS {
HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
};
#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
__le32 hardware;
__le32 rsv;
__le32 api_caps;
__le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
......@@ -292,7 +296,8 @@ struct hclgevf_dev_specs_0_cmd {
#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
struct hclgevf_dev_specs_1_cmd {
__le32 rsv0;
__le16 max_frm_size;
__le16 rsv0;
__le16 max_int_gl;
u8 rsv1[18];
};
......
......@@ -642,22 +642,20 @@ static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
return HCLGEVF_RSS_KEY_SIZE;
}
static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
{
return HCLGEVF_RSS_IND_TBL_SIZE;
}
static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
{
const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
struct hclgevf_rss_indirection_table_cmd *req;
struct hclgevf_desc desc;
int rss_cfg_tbl_num;
int status;
int i, j;
req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
HCLGEVF_RSS_CFG_TBL_SIZE;
for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
for (i = 0; i < rss_cfg_tbl_num; i++) {
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
false);
req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
......@@ -795,7 +793,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
}
if (indir)
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
return 0;
......@@ -838,7 +836,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* update the shadow RSS table with user specified qids */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
......@@ -2482,8 +2480,9 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
return ret;
}
static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
{
u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_tuple_cfg *tuple_sets;
u32 i;
......@@ -2492,7 +2491,16 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
u8 *rss_ind_tbl;
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
sizeof(*rss_ind_tbl), GFP_KERNEL);
if (!rss_ind_tbl)
return -ENOMEM;
rss_cfg->rss_indirection_tbl = rss_ind_tbl;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
......@@ -2510,8 +2518,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
}
/* Initialize RSS indirect table */
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
return 0;
}
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
......@@ -3048,6 +3058,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
......@@ -3066,6 +3077,7 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
......@@ -3080,6 +3092,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
......@@ -3266,7 +3280,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
hclgevf_rss_init_cfg(hdev);
ret = hclgevf_rss_init_cfg(hdev);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_config;
}
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
......@@ -3444,11 +3463,12 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
sizeof(u32), GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
hdev->rss_cfg.rss_size = kinfo->rss_size;
......@@ -3687,7 +3707,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
.get_rss_key_size = hclgevf_get_rss_key_size,
.get_rss_indir_size = hclgevf_get_rss_indir_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
......
......@@ -113,8 +113,7 @@
#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
#define HCLGEVF_RSS_CFG_TBL_NUM \
(HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGEVF_D_PORT_BIT BIT(0)
......@@ -125,6 +124,8 @@
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
......@@ -217,7 +218,8 @@ struct hclgevf_rss_cfg {
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
/* shadow table */
u8 *rss_indirection_tbl;
struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment