Commit feaf751d authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-fixes-for-configuration-lost-problems'

Peng Li says:

====================
fixes for configuration lost problems

This patchset refactors some functions and some bugs in order
to fix the configuration loss problem when resetting and
setting channel number.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0623951e 7a242b23
...@@ -265,6 +265,8 @@ struct hnae3_ae_dev { ...@@ -265,6 +265,8 @@ struct hnae3_ae_dev {
* Get tc size of handle * Get tc size of handle
* get_vector() * get_vector()
* Get vector number and vector information * Get vector number and vector information
* put_vector()
* Put the vector in hdev
* map_ring_to_vector() * map_ring_to_vector()
* Map rings to vector * Map rings to vector
* unmap_ring_from_vector() * unmap_ring_from_vector()
...@@ -375,6 +377,7 @@ struct hnae3_ae_ops { ...@@ -375,6 +377,7 @@ struct hnae3_ae_ops {
int (*get_vector)(struct hnae3_handle *handle, u16 vector_num, int (*get_vector)(struct hnae3_handle *handle, u16 vector_num,
struct hnae3_vector_info *vector_info); struct hnae3_vector_info *vector_info);
int (*put_vector)(struct hnae3_handle *handle, int vector_num);
int (*map_ring_to_vector)(struct hnae3_handle *handle, int (*map_ring_to_vector)(struct hnae3_handle *handle,
int vector_num, int vector_num,
struct hnae3_ring_chain_node *vr_chain); struct hnae3_ring_chain_node *vr_chain);
......
...@@ -168,8 +168,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -168,8 +168,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/ */
if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable && if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
!tqp_vector->rx_group.gl_adapt_enable) !tqp_vector->rx_group.coal.gl_adapt_enable)
/* According to the hardware, the range of rl_reg is /* According to the hardware, the range of rl_reg is
* 0-59 and the unit is 4. * 0-59 and the unit is 4.
*/ */
...@@ -205,23 +205,29 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -205,23 +205,29 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
*/ */
/* Default: enable interrupt coalescing self-adaptive and GL */ /* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.gl_adapt_enable = 1; tqp_vector->tx_group.coal.gl_adapt_enable = 1;
tqp_vector->rx_group.gl_adapt_enable = 1; tqp_vector->rx_group.coal.gl_adapt_enable = 1;
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
hns3_set_vector_coalesce_tx_gl(tqp_vector,
tqp_vector->tx_group.int_gl);
hns3_set_vector_coalesce_rx_gl(tqp_vector,
tqp_vector->rx_group.int_gl);
/* Default: disable RL */ /* Default: disable RL */
h->kinfo.int_rl_setting = 0; h->kinfo.int_rl_setting = 0;
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
}
static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
hns3_set_vector_coalesce_tx_gl(tqp_vector,
tqp_vector->tx_group.coal.int_gl);
hns3_set_vector_coalesce_rx_gl(tqp_vector,
tqp_vector->rx_group.coal.int_gl);
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
} }
static int hns3_nic_set_real_num_queue(struct net_device *netdev) static int hns3_nic_set_real_num_queue(struct net_device *netdev)
...@@ -2387,12 +2393,12 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) ...@@ -2387,12 +2393,12 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
u16 new_int_gl; u16 new_int_gl;
int usecs; int usecs;
if (!ring_group->int_gl) if (!ring_group->coal.int_gl)
return false; return false;
if (ring_group->total_packets == 0) { if (ring_group->total_packets == 0) {
ring_group->int_gl = HNS3_INT_GL_50K; ring_group->coal.int_gl = HNS3_INT_GL_50K;
ring_group->flow_level = HNS3_FLOW_LOW; ring_group->coal.flow_level = HNS3_FLOW_LOW;
return true; return true;
} }
...@@ -2402,10 +2408,10 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) ...@@ -2402,10 +2408,10 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
* 20-1249MB/s high (18000 ints/s) * 20-1249MB/s high (18000 ints/s)
* > 40000pps ultra (8000 ints/s) * > 40000pps ultra (8000 ints/s)
*/ */
new_flow_level = ring_group->flow_level; new_flow_level = ring_group->coal.flow_level;
new_int_gl = ring_group->int_gl; new_int_gl = ring_group->coal.int_gl;
tqp_vector = ring_group->ring->tqp_vector; tqp_vector = ring_group->ring->tqp_vector;
usecs = (ring_group->int_gl << 1); usecs = (ring_group->coal.int_gl << 1);
bytes_per_usecs = ring_group->total_bytes / usecs; bytes_per_usecs = ring_group->total_bytes / usecs;
/* 1000000 microseconds */ /* 1000000 microseconds */
packets_per_secs = ring_group->total_packets * 1000000 / usecs; packets_per_secs = ring_group->total_packets * 1000000 / usecs;
...@@ -2452,9 +2458,9 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) ...@@ -2452,9 +2458,9 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
ring_group->total_bytes = 0; ring_group->total_bytes = 0;
ring_group->total_packets = 0; ring_group->total_packets = 0;
ring_group->flow_level = new_flow_level; ring_group->coal.flow_level = new_flow_level;
if (new_int_gl != ring_group->int_gl) { if (new_int_gl != ring_group->coal.int_gl) {
ring_group->int_gl = new_int_gl; ring_group->coal.int_gl = new_int_gl;
return true; return true;
} }
return false; return false;
...@@ -2466,18 +2472,18 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) ...@@ -2466,18 +2472,18 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
bool rx_update, tx_update; bool rx_update, tx_update;
if (rx_group->gl_adapt_enable) { if (rx_group->coal.gl_adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group); rx_update = hns3_get_new_int_gl(rx_group);
if (rx_update) if (rx_update)
hns3_set_vector_coalesce_rx_gl(tqp_vector, hns3_set_vector_coalesce_rx_gl(tqp_vector,
rx_group->int_gl); rx_group->coal.int_gl);
} }
if (tx_group->gl_adapt_enable) { if (tx_group->coal.gl_adapt_enable) {
tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
if (tx_update) if (tx_update)
hns3_set_vector_coalesce_tx_gl(tqp_vector, hns3_set_vector_coalesce_tx_gl(tqp_vector,
tx_group->int_gl); tx_group->coal.int_gl);
} }
} }
...@@ -2625,32 +2631,18 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ...@@ -2625,32 +2631,18 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_vector_info *vector;
struct pci_dev *pdev = h->pdev;
u16 tqp_num = h->kinfo.num_tqps;
u16 vector_num;
int ret = 0; int ret = 0;
u16 i; u16 i;
/* RSS size, cpu online and vector_num should be the same */ for (i = 0; i < priv->vector_num; i++) {
/* Should consider 2p/4p later */ tqp_vector = &priv->tqp_vector[i];
vector_num = min_t(u16, num_online_cpus(), tqp_num); hns3_vector_gl_rl_init_hw(tqp_vector, priv);
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), tqp_vector->num_tqps = 0;
GFP_KERNEL); }
if (!vector)
return -ENOMEM;
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
priv->vector_num = vector_num;
priv->tqp_vector = (struct hns3_enet_tqp_vector *)
devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
GFP_KERNEL);
if (!priv->tqp_vector)
return -ENOMEM;
for (i = 0; i < tqp_num; i++) { for (i = 0; i < h->kinfo.num_tqps; i++) {
u16 vector_i = i % vector_num; u16 vector_i = i % priv->vector_num;
u16 tqp_num = h->kinfo.num_tqps;
tqp_vector = &priv->tqp_vector[vector_i]; tqp_vector = &priv->tqp_vector[vector_i];
...@@ -2660,52 +2652,94 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ...@@ -2660,52 +2652,94 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
hns3_add_ring_to_group(&tqp_vector->rx_group, hns3_add_ring_to_group(&tqp_vector->rx_group,
priv->ring_data[i + tqp_num].ring); priv->ring_data[i + tqp_num].ring);
tqp_vector->idx = vector_i;
tqp_vector->mask_addr = vector[vector_i].io_addr;
tqp_vector->vector_irq = vector[vector_i].vector;
tqp_vector->num_tqps++;
priv->ring_data[i].ring->tqp_vector = tqp_vector; priv->ring_data[i].ring->tqp_vector = tqp_vector;
priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
tqp_vector->num_tqps++;
} }
for (i = 0; i < vector_num; i++) { for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i]; tqp_vector = &priv->tqp_vector[i];
tqp_vector->rx_group.total_bytes = 0; tqp_vector->rx_group.total_bytes = 0;
tqp_vector->rx_group.total_packets = 0; tqp_vector->rx_group.total_packets = 0;
tqp_vector->tx_group.total_bytes = 0; tqp_vector->tx_group.total_bytes = 0;
tqp_vector->tx_group.total_packets = 0; tqp_vector->tx_group.total_packets = 0;
hns3_vector_gl_rl_init(tqp_vector, priv);
tqp_vector->handle = h; tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector, ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain); &vector_ring_chain);
if (ret) if (ret)
goto out; return ret;
ret = h->ae_algo->ops->map_ring_to_vector(h, ret = h->ae_algo->ops->map_ring_to_vector(h,
tqp_vector->vector_irq, &vector_ring_chain); tqp_vector->vector_irq, &vector_ring_chain);
if (ret)
goto out;
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (ret)
return ret;
netif_napi_add(priv->netdev, &tqp_vector->napi, netif_napi_add(priv->netdev, &tqp_vector->napi,
hns3_nic_common_poll, NAPI_POLL_WEIGHT); hns3_nic_common_poll, NAPI_POLL_WEIGHT);
} }
return 0;
}
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_vector_info *vector;
struct pci_dev *pdev = h->pdev;
u16 tqp_num = h->kinfo.num_tqps;
u16 vector_num;
int ret = 0;
u16 i;
/* RSS size, cpu online and vector_num should be the same */
/* Should consider 2p/4p later */
vector_num = min_t(u16, num_online_cpus(), tqp_num);
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
GFP_KERNEL);
if (!vector)
return -ENOMEM;
vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
priv->vector_num = vector_num;
priv->tqp_vector = (struct hns3_enet_tqp_vector *)
devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
GFP_KERNEL);
if (!priv->tqp_vector) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
tqp_vector->idx = i;
tqp_vector->mask_addr = vector[i].io_addr;
tqp_vector->vector_irq = vector[i].vector;
hns3_vector_gl_rl_init(tqp_vector, priv);
}
out: out:
devm_kfree(&pdev->dev, vector); devm_kfree(&pdev->dev, vector);
return ret; return ret;
} }
static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
{
group->ring = NULL;
group->count = 0;
}
static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{ {
struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
struct pci_dev *pdev = h->pdev;
int i, ret; int i, ret;
for (i = 0; i < priv->vector_num; i++) { for (i = 0; i < priv->vector_num; i++) {
...@@ -2721,6 +2755,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) ...@@ -2721,6 +2755,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
if (ret) if (ret)
return ret; return ret;
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
if (ret)
return ret;
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
...@@ -2732,12 +2770,30 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) ...@@ -2732,12 +2770,30 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
} }
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi); netif_napi_del(&priv->tqp_vector[i].napi);
} }
devm_kfree(&pdev->dev, priv->tqp_vector); return 0;
}
static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev;
int i, ret;
for (i = 0; i < priv->vector_num; i++) {
struct hns3_enet_tqp_vector *tqp_vector;
tqp_vector = &priv->tqp_vector[i];
ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
if (ret)
return ret;
}
devm_kfree(&pdev->dev, priv->tqp_vector);
return 0; return 0;
} }
...@@ -2967,13 +3023,8 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) ...@@ -2967,13 +3023,8 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
h->ae_algo->ops->reset_queue(h, i); h->ae_algo->ops->reset_queue(h, i);
hns3_fini_ring(priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i].ring);
devm_kfree(priv->dev, priv->ring_data[i].ring);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
} }
devm_kfree(priv->dev, priv->ring_data);
return 0; return 0;
} }
...@@ -3058,6 +3109,12 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3058,6 +3109,12 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_get_ring_cfg; goto out_get_ring_cfg;
} }
ret = hns3_nic_alloc_vector_data(priv);
if (ret) {
ret = -ENOMEM;
goto out_alloc_vector_data;
}
ret = hns3_nic_init_vector_data(priv); ret = hns3_nic_init_vector_data(priv);
if (ret) { if (ret) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -3086,8 +3143,10 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3086,8 +3143,10 @@ static int hns3_client_init(struct hnae3_handle *handle)
out_reg_netdev_fail: out_reg_netdev_fail:
out_init_ring_data: out_init_ring_data:
(void)hns3_nic_uninit_vector_data(priv); (void)hns3_nic_uninit_vector_data(priv);
priv->ring_data = NULL;
out_init_vector_data: out_init_vector_data:
hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data:
priv->ring_data = NULL;
out_get_ring_cfg: out_get_ring_cfg:
priv->ae_handle = NULL; priv->ae_handle = NULL;
free_netdev(netdev); free_netdev(netdev);
...@@ -3107,10 +3166,16 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) ...@@ -3107,10 +3166,16 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
if (ret) if (ret)
netdev_err(netdev, "uninit vector error\n"); netdev_err(netdev, "uninit vector error\n");
ret = hns3_nic_dealloc_vector_data(priv);
if (ret)
netdev_err(netdev, "dealloc vector error\n");
ret = hns3_uninit_all_ring(priv); ret = hns3_uninit_all_ring(priv);
if (ret) if (ret)
netdev_err(netdev, "uninit ring error\n"); netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
priv->ring_data = NULL; priv->ring_data = NULL;
free_netdev(netdev); free_netdev(netdev);
...@@ -3316,6 +3381,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) ...@@ -3316,6 +3381,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret) if (ret)
netdev_err(netdev, "uninit ring error\n"); netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
priv->ring_data = NULL; priv->ring_data = NULL;
return ret; return ret;
...@@ -3346,7 +3413,24 @@ static int hns3_reset_notify(struct hnae3_handle *handle, ...@@ -3346,7 +3413,24 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret; return ret;
} }
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) static void hns3_restore_coal(struct hns3_nic_priv *priv,
struct hns3_enet_coalesce *tx,
struct hns3_enet_coalesce *rx)
{
u16 vector_num = priv->vector_num;
int i;
for (i = 0; i < vector_num; i++) {
memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
sizeof(struct hns3_enet_coalesce));
memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
sizeof(struct hns3_enet_coalesce));
}
}
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
struct hns3_enet_coalesce *tx,
struct hns3_enet_coalesce *rx)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
...@@ -3360,6 +3444,12 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) ...@@ -3360,6 +3444,12 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
if (ret) if (ret)
return ret; return ret;
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
goto err_alloc_vector;
hns3_restore_coal(priv, tx, rx);
ret = hns3_nic_init_vector_data(priv); ret = hns3_nic_init_vector_data(priv);
if (ret) if (ret)
goto err_uninit_vector; goto err_uninit_vector;
...@@ -3374,6 +3464,8 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) ...@@ -3374,6 +3464,8 @@ static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
hns3_put_ring_config(priv); hns3_put_ring_config(priv);
err_uninit_vector: err_uninit_vector:
hns3_nic_uninit_vector_data(priv); hns3_nic_uninit_vector_data(priv);
err_alloc_vector:
hns3_nic_dealloc_vector_data(priv);
return ret; return ret;
} }
...@@ -3388,6 +3480,7 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -3388,6 +3480,7 @@ int hns3_set_channels(struct net_device *netdev,
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo; struct hnae3_knic_private_info *kinfo = &h->kinfo;
struct hns3_enet_coalesce tx_coal, rx_coal;
bool if_running = netif_running(netdev); bool if_running = netif_running(netdev);
u32 new_tqp_num = ch->combined_count; u32 new_tqp_num = ch->combined_count;
u16 org_tqp_num; u16 org_tqp_num;
...@@ -3421,12 +3514,26 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -3421,12 +3514,26 @@ int hns3_set_channels(struct net_device *netdev,
goto open_netdev; goto open_netdev;
} }
/* Changing the tqp num may also change the vector num,
* ethtool only support setting and querying one coal
* configuation for now, so save the vector 0' coal
* configuation here in order to restore it.
*/
memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
sizeof(struct hns3_enet_coalesce));
memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
sizeof(struct hns3_enet_coalesce));
hns3_nic_dealloc_vector_data(priv);
hns3_uninit_all_ring(priv); hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
org_tqp_num = h->kinfo.num_tqps; org_tqp_num = h->kinfo.num_tqps;
ret = hns3_modify_tqp_num(netdev, new_tqp_num); ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
if (ret) { if (ret) {
ret = hns3_modify_tqp_num(netdev, org_tqp_num); ret = hns3_modify_tqp_num(netdev, org_tqp_num,
&tx_coal, &rx_coal);
if (ret) { if (ret) {
/* If revert to old tqp failed, fatal error occurred */ /* If revert to old tqp failed, fatal error occurred */
dev_err(&netdev->dev, dev_err(&netdev->dev,
......
...@@ -460,15 +460,19 @@ enum hns3_link_mode_bits { ...@@ -460,15 +460,19 @@ enum hns3_link_mode_bits {
#define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40 #define HNS3_INT_RL_ENABLE_MASK 0x40
struct hns3_enet_coalesce {
u16 int_gl;
u8 gl_adapt_enable;
enum hns3_flow_level_range flow_level;
};
struct hns3_enet_ring_group { struct hns3_enet_ring_group {
/* array of pointers to rings */ /* array of pointers to rings */
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
u64 total_bytes; /* total bytes processed this group */ u64 total_bytes; /* total bytes processed this group */
u64 total_packets; /* total packets processed this group */ u64 total_packets; /* total packets processed this group */
u16 count; u16 count;
enum hns3_flow_level_range flow_level; struct hns3_enet_coalesce coal;
u16 int_gl;
u8 gl_adapt_enable;
}; };
struct hns3_enet_tqp_vector { struct hns3_enet_tqp_vector {
......
...@@ -905,11 +905,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, ...@@ -905,11 +905,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
tx_vector = priv->ring_data[queue].ring->tqp_vector; tx_vector = priv->ring_data[queue].ring->tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.gl_adapt_enable; cmd->use_adaptive_tx_coalesce =
cmd->use_adaptive_rx_coalesce = rx_vector->rx_group.gl_adapt_enable; tx_vector->tx_group.coal.gl_adapt_enable;
cmd->use_adaptive_rx_coalesce =
rx_vector->rx_group.coal.gl_adapt_enable;
cmd->tx_coalesce_usecs = tx_vector->tx_group.int_gl; cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
cmd->rx_coalesce_usecs = rx_vector->rx_group.int_gl; cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
...@@ -1029,14 +1031,18 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, ...@@ -1029,14 +1031,18 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
tx_vector = priv->ring_data[queue].ring->tqp_vector; tx_vector = priv->ring_data[queue].ring->tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector;
tx_vector->tx_group.gl_adapt_enable = cmd->use_adaptive_tx_coalesce; tx_vector->tx_group.coal.gl_adapt_enable =
rx_vector->rx_group.gl_adapt_enable = cmd->use_adaptive_rx_coalesce; cmd->use_adaptive_tx_coalesce;
rx_vector->rx_group.coal.gl_adapt_enable =
cmd->use_adaptive_rx_coalesce;
tx_vector->tx_group.int_gl = cmd->tx_coalesce_usecs; tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs;
rx_vector->rx_group.int_gl = cmd->rx_coalesce_usecs; rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs;
hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.int_gl); hns3_set_vector_coalesce_tx_gl(tx_vector,
hns3_set_vector_coalesce_rx_gl(rx_vector, rx_vector->rx_group.int_gl); tx_vector->tx_group.coal.int_gl);
hns3_set_vector_coalesce_rx_gl(rx_vector,
rx_vector->rx_group.coal.int_gl);
hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
......
...@@ -144,6 +144,8 @@ static int hclge_map_update(struct hnae3_handle *h) ...@@ -144,6 +144,8 @@ static int hclge_map_update(struct hnae3_handle *h)
if (ret) if (ret)
return ret; return ret;
hclge_rss_indir_init_cfg(hdev);
return hclge_rss_init_hw(hdev); return hclge_rss_init_hw(hdev);
} }
......
...@@ -2969,6 +2969,24 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) ...@@ -2969,6 +2969,24 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
return -EINVAL; return -EINVAL;
} }
static int hclge_put_vector(struct hnae3_handle *handle, int vector)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int vector_id;
vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&hdev->pdev->dev,
"Get vector index fail. vector_id =%d\n", vector_id);
return vector_id;
}
hclge_free_vector(hdev, vector_id);
return 0;
}
static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
{ {
return HCLGE_RSS_KEY_SIZE; return HCLGE_RSS_KEY_SIZE;
...@@ -2979,31 +2997,6 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) ...@@ -2979,31 +2997,6 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
return HCLGE_RSS_IND_TBL_SIZE; return HCLGE_RSS_IND_TBL_SIZE;
} }
static int hclge_get_rss_algo(struct hclge_dev *hdev)
{
struct hclge_rss_config_cmd *req;
struct hclge_desc desc;
int rss_hash_algo;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get link status error, status =%d\n", ret);
return ret;
}
req = (struct hclge_rss_config_cmd *)desc.data;
rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
return ETH_RSS_HASH_TOP;
return -EINVAL;
}
static int hclge_set_rss_algo_key(struct hclge_dev *hdev, static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key) const u8 hfunc, const u8 *key)
{ {
...@@ -3042,7 +3035,7 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, ...@@ -3042,7 +3035,7 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
return 0; return 0;
} }
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
{ {
struct hclge_rss_indirection_table_cmd *req; struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
...@@ -3116,14 +3109,16 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) ...@@ -3116,14 +3109,16 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
req = (struct hclge_rss_input_tuple_cmd *)desc.data; req = (struct hclge_rss_input_tuple_cmd *)desc.data;
req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; /* Get the tuple cfg from pf */
req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -3138,12 +3133,11 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, ...@@ -3138,12 +3133,11 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc) u8 *key, u8 *hfunc)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int i; int i;
/* Get hash algorithm */ /* Get hash algorithm */
if (hfunc) if (hfunc)
*hfunc = hclge_get_rss_algo(hdev); *hfunc = vport->rss_algo;
/* Get the RSS Key required by the user */ /* Get the RSS Key required by the user */
if (key) if (key)
...@@ -3167,8 +3161,6 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, ...@@ -3167,8 +3161,6 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
/* Set the RSS Hash Key if specififed by the user */ /* Set the RSS Hash Key if specififed by the user */
if (key) { if (key) {
/* Update the shadow RSS key with user specified qids */
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
if (hfunc == ETH_RSS_HASH_TOP || if (hfunc == ETH_RSS_HASH_TOP ||
hfunc == ETH_RSS_HASH_NO_CHANGE) hfunc == ETH_RSS_HASH_NO_CHANGE)
...@@ -3178,6 +3170,10 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, ...@@ -3178,6 +3170,10 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
ret = hclge_set_rss_algo_key(hdev, hash_algo, key); ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret) if (ret)
return ret; return ret;
/* Update the shadow RSS key with user specified qids */
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
vport->rss_algo = hash_algo;
} }
/* Update the shadow RSS table with user specified qids */ /* Update the shadow RSS table with user specified qids */
...@@ -3185,8 +3181,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, ...@@ -3185,8 +3181,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
vport->rss_indirection_tbl[i] = indir[i]; vport->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */ /* Update the hardware */
ret = hclge_set_rss_indir_table(hdev, indir); return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
return ret;
} }
static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
...@@ -3229,15 +3224,16 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, ...@@ -3229,15 +3224,16 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL; return -EINVAL;
req = (struct hclge_rss_input_tuple_cmd *)desc.data; req = (struct hclge_rss_input_tuple_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Read rss tuple fail, status = %d\n", ret);
return ret;
}
hclge_cmd_reuse_desc(&desc, false); req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
tuple_sets = hclge_get_rss_hash_bits(nfc); tuple_sets = hclge_get_rss_hash_bits(nfc);
switch (nfc->flow_type) { switch (nfc->flow_type) {
...@@ -3274,52 +3270,49 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, ...@@ -3274,52 +3270,49 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
} }
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Set rss tuple fail, status = %d\n", ret); "Set rss tuple fail, status = %d\n", ret);
return ret; return ret;
}
vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
return 0;
} }
static int hclge_get_rss_tuple(struct hnae3_handle *handle, static int hclge_get_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc) struct ethtool_rxnfc *nfc)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_rss_input_tuple_cmd *req;
struct hclge_desc desc;
u8 tuple_sets; u8 tuple_sets;
int ret;
nfc->data = 0; nfc->data = 0;
req = (struct hclge_rss_input_tuple_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Read rss tuple fail, status = %d\n", ret);
return ret;
}
switch (nfc->flow_type) { switch (nfc->flow_type) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
tuple_sets = req->ipv4_tcp_en; tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
break; break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
tuple_sets = req->ipv4_udp_en; tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
break; break;
case TCP_V6_FLOW: case TCP_V6_FLOW:
tuple_sets = req->ipv6_tcp_en; tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
break; break;
case UDP_V6_FLOW: case UDP_V6_FLOW:
tuple_sets = req->ipv6_udp_en; tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
break; break;
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
tuple_sets = req->ipv4_sctp_en; tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
break; break;
case SCTP_V6_FLOW: case SCTP_V6_FLOW:
tuple_sets = req->ipv6_sctp_en; tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
break; break;
case IPV4_FLOW: case IPV4_FLOW:
case IPV6_FLOW: case IPV6_FLOW:
...@@ -3354,50 +3347,28 @@ static int hclge_get_tc_size(struct hnae3_handle *handle) ...@@ -3354,50 +3347,28 @@ static int hclge_get_tc_size(struct hnae3_handle *handle)
int hclge_rss_init_hw(struct hclge_dev *hdev) int hclge_rss_init_hw(struct hclge_dev *hdev)
{ {
const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport; struct hclge_vport *vport = hdev->vport;
u8 *rss_indir = vport[0].rss_indirection_tbl;
u16 rss_size = vport[0].alloc_rss_size;
u8 *key = vport[0].rss_hash_key;
u8 hfunc = vport[0].rss_algo;
u16 tc_offset[HCLGE_MAX_TC_NUM]; u16 tc_offset[HCLGE_MAX_TC_NUM];
u8 rss_key[HCLGE_RSS_KEY_SIZE];
u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 tc_size[HCLGE_MAX_TC_NUM]; u16 tc_size[HCLGE_MAX_TC_NUM];
u32 *rss_indir = NULL; u16 roundup_size;
u16 rss_size = 0, roundup_size; int i, ret;
const u8 *key;
int i, ret, j;
rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
/* Get default RSS key */
netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE);
/* Initialize RSS indirect table for each vport */
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
vport[j].rss_indirection_tbl[i] =
i % vport[j].alloc_rss_size;
/* vport 0 is for PF */
if (j != 0)
continue;
rss_size = vport[j].alloc_rss_size;
rss_indir[i] = vport[j].rss_indirection_tbl[i];
}
}
ret = hclge_set_rss_indir_table(hdev, rss_indir); ret = hclge_set_rss_indir_table(hdev, rss_indir);
if (ret) if (ret)
goto err; return ret;
key = rss_key;
ret = hclge_set_rss_algo_key(hdev, hfunc, key); ret = hclge_set_rss_algo_key(hdev, hfunc, key);
if (ret) if (ret)
goto err; return ret;
ret = hclge_set_rss_input_tuple(hdev); ret = hclge_set_rss_input_tuple(hdev);
if (ret) if (ret)
goto err; return ret;
/* Each TC have the same queue size, and tc_size set to hardware is /* Each TC have the same queue size, and tc_size set to hardware is
* the log2 of roundup power of two of rss_size, the acutal queue * the log2 of roundup power of two of rss_size, the acutal queue
...@@ -3407,8 +3378,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) ...@@ -3407,8 +3378,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Configure rss tc size failed, invalid TC_SIZE = %d\n", "Configure rss tc size failed, invalid TC_SIZE = %d\n",
rss_size); rss_size);
ret = -EINVAL; return -EINVAL;
goto err;
} }
roundup_size = roundup_pow_of_two(rss_size); roundup_size = roundup_pow_of_two(rss_size);
...@@ -3425,12 +3395,50 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) ...@@ -3425,12 +3395,50 @@ int hclge_rss_init_hw(struct hclge_dev *hdev)
tc_offset[i] = rss_size * i; tc_offset[i] = rss_size * i;
} }
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
}
err: void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
kfree(rss_indir); {
struct hclge_vport *vport = hdev->vport;
int i, j;
return ret; for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
vport[j].rss_indirection_tbl[i] =
i % vport[j].alloc_rss_size;
}
}
static void hclge_rss_init_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int i;
netdev_rss_key_fill(vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
vport[i].rss_tuple_sets.ipv4_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv4_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv4_sctp_en =
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv4_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
}
hclge_rss_indir_init_cfg(hdev);
} }
int hclge_bind_ring_with_vector(struct hclge_vport *vport, int hclge_bind_ring_with_vector(struct hclge_vport *vport,
...@@ -3533,18 +3541,13 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, ...@@ -3533,18 +3541,13 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
} }
ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain); ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
if (ret) { if (ret)
dev_err(&handle->pdev->dev, dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vectorid=%d, ret =%d\n", "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
vector_id, vector_id,
ret); ret);
return ret;
}
/* Free this MSIX or MSI vector */
hclge_free_vector(hdev, vector_id);
return 0; return ret;
} }
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
...@@ -5398,6 +5401,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5398,6 +5401,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
hclge_rss_init_cfg(hdev);
ret = hclge_rss_init_hw(hdev); ret = hclge_rss_init_hw(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
...@@ -5502,9 +5506,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -5502,9 +5506,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
ret = hclge_tm_schd_init(hdev); ret = hclge_tm_init_hw(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
return ret; return ret;
} }
...@@ -6003,6 +6007,7 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -6003,6 +6007,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.map_ring_to_vector = hclge_map_ring_to_vector, .map_ring_to_vector = hclge_map_ring_to_vector,
.unmap_ring_from_vector = hclge_unmap_ring_frm_vector, .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
.get_vector = hclge_get_vector, .get_vector = hclge_get_vector,
.put_vector = hclge_put_vector,
.set_promisc_mode = hclge_set_promisc_mode, .set_promisc_mode = hclge_set_promisc_mode,
.set_loopback = hclge_set_loopback, .set_loopback = hclge_set_loopback,
.start = hclge_ae_start, .start = hclge_ae_start,
......
...@@ -573,12 +573,27 @@ struct hclge_rx_vtag_cfg { ...@@ -573,12 +573,27 @@ struct hclge_rx_vtag_cfg {
bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
}; };
struct hclge_rss_tuple_cfg {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
u8 ipv4_fragment_en;
u8 ipv6_tcp_en;
u8 ipv6_udp_en;
u8 ipv6_sctp_en;
u8 ipv6_fragment_en;
};
struct hclge_vport { struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */ u16 alloc_tqps; /* Allocated Tx/Rx queues */
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */ /* User configured lookup table entries */
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
u16 alloc_rss_size; u16 alloc_rss_size;
u16 qs_offset; u16 qs_offset;
...@@ -627,6 +642,7 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, ...@@ -627,6 +642,7 @@ int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid,
int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev);
void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
......
...@@ -627,13 +627,18 @@ static int hclgevf_unmap_ring_from_vector( ...@@ -627,13 +627,18 @@ static int hclgevf_unmap_ring_from_vector(
} }
ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain); ret = hclgevf_bind_ring_to_vector(handle, false, vector, ring_chain);
if (ret) { if (ret)
dev_err(&handle->pdev->dev, dev_err(&handle->pdev->dev,
"Unmap ring from vector fail. vector=%d, ret =%d\n", "Unmap ring from vector fail. vector=%d, ret =%d\n",
vector_id, vector_id,
ret); ret);
return ret; return ret;
} }
static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
hclgevf_free_vector(hdev, vector); hclgevf_free_vector(hdev, vector);
...@@ -1466,6 +1471,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -1466,6 +1471,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.map_ring_to_vector = hclgevf_map_ring_to_vector, .map_ring_to_vector = hclgevf_map_ring_to_vector,
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
.get_vector = hclgevf_get_vector, .get_vector = hclgevf_get_vector,
.put_vector = hclgevf_put_vector,
.reset_queue = hclgevf_reset_tqp, .reset_queue = hclgevf_reset_tqp,
.set_promisc_mode = hclgevf_set_promisc_mode, .set_promisc_mode = hclgevf_set_promisc_mode,
.get_mac_addr = hclgevf_get_mac_addr, .get_mac_addr = hclgevf_get_mac_addr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment