Commit f04d402f authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-fixes'

Huazhong Tan says:

====================
net: hns3: code optimizations & bugfixes for HNS3 driver

This patchset includes bugfixes and code optimizations for the HNS3
ethernet controller driver
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 51a5365c 1f609492
...@@ -124,6 +124,7 @@ enum hnae3_reset_notify_type { ...@@ -124,6 +124,7 @@ enum hnae3_reset_notify_type {
HNAE3_DOWN_CLIENT, HNAE3_DOWN_CLIENT,
HNAE3_INIT_CLIENT, HNAE3_INIT_CLIENT,
HNAE3_UNINIT_CLIENT, HNAE3_UNINIT_CLIENT,
HNAE3_RESTORE_CLIENT,
}; };
enum hnae3_reset_type { enum hnae3_reset_type {
...@@ -500,6 +501,7 @@ struct hnae3_tc_info { ...@@ -500,6 +501,7 @@ struct hnae3_tc_info {
struct hnae3_knic_private_info { struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */ struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */ u16 rss_size; /* Allocated RSS queues */
u16 req_rss_size;
u16 rx_buf_len; u16 rx_buf_len;
u16 num_desc; u16 num_desc;
......
...@@ -3185,6 +3185,9 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) ...@@ -3185,6 +3185,9 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
for (i = 0; i < priv->vector_num; i++) { for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i]; tqp_vector = &priv->tqp_vector[i];
if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
continue;
ret = hns3_get_vector_ring_chain(tqp_vector, ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain); &vector_ring_chain);
if (ret) if (ret)
...@@ -3205,7 +3208,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) ...@@ -3205,7 +3208,6 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
} }
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group); hns3_clear_ring_group(&tqp_vector->tx_group);
netif_napi_del(&priv->tqp_vector[i].napi); netif_napi_del(&priv->tqp_vector[i].napi);
...@@ -3238,6 +3240,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ...@@ -3238,6 +3240,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
{ {
struct hns3_nic_ring_data *ring_data = priv->ring_data; struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps; int queue_num = priv->ae_handle->kinfo.num_tqps;
int desc_num = priv->ae_handle->kinfo.num_desc;
struct pci_dev *pdev = priv->ae_handle->pdev; struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
...@@ -3263,7 +3266,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ...@@ -3263,7 +3266,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->dev = priv->dev; ring->dev = priv->dev;
ring->desc_dma_addr = 0; ring->desc_dma_addr = 0;
ring->buf_size = q->buf_size; ring->buf_size = q->buf_size;
ring->desc_num = q->desc_num; ring->desc_num = desc_num;
ring->next_to_use = 0; ring->next_to_use = 0;
ring->next_to_clean = 0; ring->next_to_clean = 0;
...@@ -3725,7 +3728,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) ...@@ -3725,7 +3728,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
{ {
struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev; struct net_device *ndev = kinfo->netdev;
bool if_running;
int ret; int ret;
if (tc > HNAE3_MAX_TC) if (tc > HNAE3_MAX_TC)
...@@ -3734,24 +3736,13 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) ...@@ -3734,24 +3736,13 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (!ndev) if (!ndev)
return -ENODEV; return -ENODEV;
if_running = netif_running(ndev);
if (if_running) {
(void)hns3_nic_net_stop(ndev);
msleep(100);
}
ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
if (ret) if (ret)
goto err_out; return ret;
ret = hns3_nic_set_real_num_queue(ndev); ret = hns3_nic_set_real_num_queue(ndev);
err_out:
if (if_running)
(void)hns3_nic_net_open(ndev);
return ret; return ret;
} }
...@@ -4013,41 +4004,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) ...@@ -4013,41 +4004,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
{ {
struct net_device *netdev = handle->kinfo.netdev; struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
bool vlan_filter_enable;
int ret; int ret;
ret = hns3_init_mac_addr(netdev, false); /* Carrier off reporting is important to ethtool even BEFORE open */
if (ret) netif_carrier_off(netdev);
return ret;
ret = hns3_recover_hw_addr(netdev);
if (ret)
return ret;
ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
if (ret)
return ret;
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
if (ret)
return ret;
}
ret = hns3_restore_fd_rules(netdev); ret = hns3_get_ring_config(priv);
if (ret) if (ret)
return ret; return ret;
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
ret = hns3_nic_alloc_vector_data(priv); ret = hns3_nic_alloc_vector_data(priv);
if (ret) if (ret)
return ret; goto err_put_ring;
hns3_restore_coal(priv); hns3_restore_coal(priv);
...@@ -4068,10 +4036,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) ...@@ -4068,10 +4036,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
priv->ring_data = NULL; priv->ring_data = NULL;
err_dealloc_vector: err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv); hns3_nic_dealloc_vector_data(priv);
err_put_ring:
hns3_put_ring_config(priv);
priv->ring_data = NULL;
return ret; return ret;
} }
static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
bool vlan_filter_enable;
int ret;
ret = hns3_init_mac_addr(netdev, false);
if (ret)
return ret;
ret = hns3_recover_hw_addr(netdev);
if (ret)
return ret;
ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
if (ret)
return ret;
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
/* Hardware table is only clear when pf resets */
if (!(handle->flags & HNAE3_SUPPORT_VF)) {
ret = hns3_restore_vlan(netdev);
if (ret)
return ret;
}
return hns3_restore_fd_rules(netdev);
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{ {
struct net_device *netdev = handle->kinfo.netdev; struct net_device *netdev = handle->kinfo.netdev;
...@@ -4101,6 +4103,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) ...@@ -4101,6 +4103,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
if (ret) if (ret)
netdev_err(netdev, "uninit ring error\n"); netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
priv->ring_data = NULL;
clear_bit(HNS3_NIC_STATE_INITED, &priv->state); clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret; return ret;
...@@ -4124,6 +4129,9 @@ static int hns3_reset_notify(struct hnae3_handle *handle, ...@@ -4124,6 +4129,9 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT: case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle); ret = hns3_reset_notify_uninit_enet(handle);
break; break;
case HNAE3_RESTORE_CLIENT:
ret = hns3_reset_notify_restore_enet(handle);
break;
default: default:
break; break;
} }
...@@ -4131,57 +4139,11 @@ static int hns3_reset_notify(struct hnae3_handle *handle, ...@@ -4131,57 +4139,11 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
return ret; return ret;
} }
static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
if (ret)
return ret;
ret = hns3_get_ring_config(priv);
if (ret)
return ret;
ret = hns3_nic_alloc_vector_data(priv);
if (ret)
goto err_alloc_vector;
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_uninit_vector;
ret = hns3_init_all_ring(priv);
if (ret)
goto err_put_ring;
return 0;
err_put_ring:
hns3_put_ring_config(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
err_alloc_vector:
hns3_nic_dealloc_vector_data(priv);
return ret;
}
static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
{
return (new_tqp_num / num_tc) * num_tc;
}
int hns3_set_channels(struct net_device *netdev, int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch) struct ethtool_channels *ch)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo; struct hnae3_knic_private_info *kinfo = &h->kinfo;
bool if_running = netif_running(netdev);
u32 new_tqp_num = ch->combined_count; u32 new_tqp_num = ch->combined_count;
u16 org_tqp_num; u16 org_tqp_num;
int ret; int ret;
...@@ -4190,39 +4152,28 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -4190,39 +4152,28 @@ int hns3_set_channels(struct net_device *netdev,
return -EINVAL; return -EINVAL;
if (new_tqp_num > hns3_get_max_available_channels(h) || if (new_tqp_num > hns3_get_max_available_channels(h) ||
new_tqp_num < kinfo->num_tc) { new_tqp_num < 1) {
dev_err(&netdev->dev, dev_err(&netdev->dev,
"Change tqps fail, the tqp range is from %d to %d", "Change tqps fail, the tqp range is from 1 to %d",
kinfo->num_tc,
hns3_get_max_available_channels(h)); hns3_get_max_available_channels(h));
return -EINVAL; return -EINVAL;
} }
new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); if (kinfo->rss_size == new_tqp_num)
if (kinfo->num_tqps == new_tqp_num)
return 0; return 0;
if (if_running) ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
hns3_nic_net_stop(netdev); if (ret)
return ret;
ret = hns3_nic_uninit_vector_data(priv);
if (ret) {
dev_err(&netdev->dev,
"Unbind vector with tqp fail, nothing is changed");
goto open_netdev;
}
hns3_store_coal(priv);
hns3_nic_dealloc_vector_data(priv);
hns3_uninit_all_ring(priv); ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
hns3_put_ring_config(priv); if (ret)
return ret;
org_tqp_num = h->kinfo.num_tqps; org_tqp_num = h->kinfo.num_tqps;
ret = hns3_modify_tqp_num(netdev, new_tqp_num); ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
if (ret) { if (ret) {
ret = hns3_modify_tqp_num(netdev, org_tqp_num); ret = h->ae_algo->ops->set_channels(h, org_tqp_num);
if (ret) { if (ret) {
/* If revert to old tqp failed, fatal error occurred */ /* If revert to old tqp failed, fatal error occurred */
dev_err(&netdev->dev, dev_err(&netdev->dev,
...@@ -4232,12 +4183,11 @@ int hns3_set_channels(struct net_device *netdev, ...@@ -4232,12 +4183,11 @@ int hns3_set_channels(struct net_device *netdev,
dev_info(&netdev->dev, dev_info(&netdev->dev,
"Change tqp num fail, Revert to old tqp num"); "Change tqp num fail, Revert to old tqp num");
} }
ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
if (ret)
return ret;
open_netdev: return hns3_reset_notify(h, HNAE3_UP_CLIENT);
if (if_running)
hns3_nic_net_open(netdev);
return ret;
} }
static const struct hnae3_client_ops client_ops = { static const struct hnae3_client_ops client_ops = {
......
...@@ -412,7 +412,6 @@ struct hns3_enet_ring { ...@@ -412,7 +412,6 @@ struct hns3_enet_ring {
unsigned char *va; /* first buffer address for current packet */ unsigned char *va; /* first buffer address for current packet */
u32 flag; /* ring attribute */ u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node; int numa_node;
cpumask_t affinity_mask; cpumask_t affinity_mask;
......
...@@ -222,6 +222,16 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) ...@@ -222,6 +222,16 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret) if (ret)
return ret; return ret;
if (map_changed) {
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
return ret;
}
hclge_tm_schd_info_update(hdev, num_tc); hclge_tm_schd_info_update(hdev, num_tc);
ret = hclge_ieee_ets_to_tm_info(hdev, ets); ret = hclge_ieee_ets_to_tm_info(hdev, ets);
...@@ -232,6 +242,13 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) ...@@ -232,6 +242,13 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
ret = hclge_client_setup_tc(hdev); ret = hclge_client_setup_tc(hdev);
if (ret) if (ret)
return ret; return ret;
ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
return ret;
ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
if (ret)
return ret;
} }
return hclge_tm_dwrr_cfg(hdev); return hclge_tm_dwrr_cfg(hdev);
......
...@@ -1068,14 +1068,14 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, ...@@ -1068,14 +1068,14 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
return ret; return ret;
} }
static int hclge_assign_tqp(struct hclge_vport *vport) static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
{ {
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int i, alloced; int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps && for (i = 0, alloced = 0; i < hdev->num_tqps &&
alloced < kinfo->num_tqps; i++) { alloced < num_tqps; i++) {
if (!hdev->htqp[i].alloced) { if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic; hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced; hdev->htqp[i].q.tqp_index = alloced;
...@@ -1085,7 +1085,9 @@ static int hclge_assign_tqp(struct hclge_vport *vport) ...@@ -1085,7 +1085,9 @@ static int hclge_assign_tqp(struct hclge_vport *vport)
alloced++; alloced++;
} }
} }
vport->alloc_tqps = kinfo->num_tqps; vport->alloc_tqps = alloced;
kinfo->rss_size = min_t(u16, hdev->rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
return 0; return 0;
} }
...@@ -1096,36 +1098,17 @@ static int hclge_knic_setup(struct hclge_vport *vport, ...@@ -1096,36 +1098,17 @@ static int hclge_knic_setup(struct hclge_vport *vport,
struct hnae3_handle *nic = &vport->nic; struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo; struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int i, ret; int ret;
kinfo->num_desc = num_desc; kinfo->num_desc = num_desc;
kinfo->rx_buf_len = hdev->rx_buf_len; kinfo->rx_buf_len = hdev->rx_buf_len;
kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc);
kinfo->rss_size
= min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc);
kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc;
for (i = 0; i < HNAE3_MAX_TC; i++) { kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
if (hdev->hw_tc_map & BIT(i)) {
kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
kinfo->tc_info[i].tc = i;
} else {
/* Set to default queue if TC is disable */
kinfo->tc_info[i].enable = false;
kinfo->tc_info[i].tqp_offset = 0;
kinfo->tc_info[i].tqp_count = 1;
kinfo->tc_info[i].tc = 0;
}
}
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL); sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp) if (!kinfo->tqp)
return -ENOMEM; return -ENOMEM;
ret = hclge_assign_tqp(vport); ret = hclge_assign_tqp(vport, num_tqps);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
...@@ -1140,7 +1123,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, ...@@ -1140,7 +1123,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
u16 i; u16 i;
kinfo = &nic->kinfo; kinfo = &nic->kinfo;
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < vport->alloc_tqps; i++) {
struct hclge_tqp *q = struct hclge_tqp *q =
container_of(kinfo->tqp[i], struct hclge_tqp, q); container_of(kinfo->tqp[i], struct hclge_tqp, q);
bool is_pf; bool is_pf;
...@@ -2418,8 +2401,8 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev) ...@@ -2418,8 +2401,8 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
hclge_free_vector(hdev, 0); hclge_free_vector(hdev, 0);
} }
static int hclge_notify_client(struct hclge_dev *hdev, int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type) enum hnae3_reset_notify_type type)
{ {
struct hnae3_client *client = hdev->nic_client; struct hnae3_client *client = hdev->nic_client;
u16 i; u16 i;
...@@ -2883,6 +2866,10 @@ static void hclge_reset(struct hclge_dev *hdev) ...@@ -2883,6 +2866,10 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret) if (ret)
goto err_reset_lock; goto err_reset_lock;
ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
if (ret)
goto err_reset_lock;
hclge_clear_reset_cause(hdev); hclge_clear_reset_cause(hdev);
ret = hclge_reset_prepare_up(hdev); ret = hclge_reset_prepare_up(hdev);
...@@ -5258,6 +5245,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, ...@@ -5258,6 +5245,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en) enum hnae3_loop loop_mode, bool en)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int i, ret; int i, ret;
...@@ -5276,7 +5264,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle, ...@@ -5276,7 +5264,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
break; break;
} }
for (i = 0; i < vport->alloc_tqps; i++) { kinfo = &vport->nic.kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
ret = hclge_tqp_enable(hdev, i, 0, en); ret = hclge_tqp_enable(hdev, i, 0, en);
if (ret) if (ret)
return ret; return ret;
...@@ -5288,11 +5277,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle, ...@@ -5288,11 +5277,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
static void hclge_reset_tqp_stats(struct hnae3_handle *handle) static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo;
struct hnae3_queue *queue; struct hnae3_queue *queue;
struct hclge_tqp *tqp; struct hclge_tqp *tqp;
int i; int i;
for (i = 0; i < vport->alloc_tqps; i++) { kinfo = &vport->nic.kinfo;
for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i]; queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q); tqp = container_of(queue, struct hclge_tqp, q);
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
...@@ -7523,18 +7514,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle) ...@@ -7523,18 +7514,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); return min_t(u32, hdev->rss_size_max,
vport->alloc_tqps / kinfo->num_tc);
} }
static void hclge_get_channels(struct hnae3_handle *handle, static void hclge_get_channels(struct hnae3_handle *handle,
struct ethtool_channels *ch) struct ethtool_channels *ch)
{ {
struct hclge_vport *vport = hclge_get_vport(handle);
ch->max_combined = hclge_get_max_channels(handle); ch->max_combined = hclge_get_max_channels(handle);
ch->other_count = 1; ch->other_count = 1;
ch->max_other = 1; ch->max_other = 1;
ch->combined_count = vport->alloc_tqps; ch->combined_count = handle->kinfo.rss_size;
} }
static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
...@@ -7547,25 +7537,6 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, ...@@ -7547,25 +7537,6 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->rss_size_max; *max_rss_size = hdev->rss_size_max;
} }
static void hclge_release_tqp(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp =
container_of(kinfo->tqp[i], struct hclge_tqp, q);
tqp->q.handle = NULL;
tqp->q.tqp_index = 0;
tqp->alloced = false;
}
devm_kfree(&hdev->pdev->dev, kinfo->tqp);
kinfo->tqp = NULL;
}
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
...@@ -7580,24 +7551,11 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) ...@@ -7580,24 +7551,11 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num)
u32 *rss_indir; u32 *rss_indir;
int ret, i; int ret, i;
/* Free old tqps, and reallocate with new tqp number when nic setup */ kinfo->req_rss_size = new_tqps_num;
hclge_release_tqp(vport);
ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc);
if (ret) {
dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret);
return ret;
}
ret = hclge_map_tqp_to_vport(hdev, vport);
if (ret) {
dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret);
return ret;
}
ret = hclge_tm_schd_init(hdev); ret = hclge_tm_vport_map_update(hdev);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
return ret; return ret;
} }
......
...@@ -878,4 +878,6 @@ void hclge_vport_stop(struct hclge_vport *vport); ...@@ -878,4 +878,6 @@ void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf); int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
#endif #endif
...@@ -517,19 +517,32 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) ...@@ -517,19 +517,32 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
{ {
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 max_rss_size;
u8 i; u8 i;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
kinfo->num_tc = kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); max_rss_size = min_t(u16, hdev->rss_size_max,
kinfo->rss_size vport->alloc_tqps / kinfo->num_tc);
= min_t(u16, hdev->rss_size_max,
kinfo->num_tqps / kinfo->num_tc); if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
kinfo->req_rss_size <= max_rss_size) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, kinfo->req_rss_size);
kinfo->rss_size = kinfo->req_rss_size;
} else if (kinfo->rss_size > max_rss_size ||
(!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
kinfo->rss_size, max_rss_size);
kinfo->rss_size = max_rss_size;
}
kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */ vport->dwrr = 100; /* 100 percent as init */
vport->alloc_rss_size = kinfo->rss_size; vport->alloc_rss_size = kinfo->rss_size;
for (i = 0; i < kinfo->num_tc; i++) { for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i)) { if (hdev->hw_tc_map & BIT(i)) {
kinfo->tc_info[i].enable = true; kinfo->tc_info[i].enable = true;
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
...@@ -1228,10 +1241,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) ...@@ -1228,10 +1241,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
} }
static int hclge_tm_bp_setup(struct hclge_dev *hdev)
{
int ret = 0;
int i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_bp_setup_hw(hdev, i);
if (ret)
return ret;
}
return ret;
}
int hclge_pause_setup_hw(struct hclge_dev *hdev) int hclge_pause_setup_hw(struct hclge_dev *hdev)
{ {
int ret; int ret;
u8 i;
ret = hclge_pause_param_setup_hw(hdev); ret = hclge_pause_param_setup_hw(hdev);
if (ret) if (ret)
...@@ -1250,13 +1276,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) ...@@ -1250,13 +1276,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret) if (ret)
dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
for (i = 0; i < hdev->tm_info.num_tc; i++) { return hclge_tm_bp_setup(hdev);
ret = hclge_bp_setup_hw(hdev, i);
if (ret)
return ret;
}
return 0;
} }
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
...@@ -1327,3 +1347,20 @@ int hclge_tm_schd_init(struct hclge_dev *hdev) ...@@ -1327,3 +1347,20 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return hclge_tm_init_hw(hdev); return hclge_tm_init_hw(hdev);
} }
int hclge_tm_vport_map_update(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
hclge_tm_vport_tc_info_update(vport);
ret = hclge_vport_q_to_qs_map(hdev, vport);
if (ret)
return ret;
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
return 0;
return hclge_tm_bp_setup(hdev);
}
...@@ -142,6 +142,7 @@ struct hclge_port_shapping_cmd { ...@@ -142,6 +142,7 @@ struct hclge_port_shapping_cmd {
(HCLGE_TM_SHAP_##string##_LSH)) (HCLGE_TM_SHAP_##string##_LSH))
int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_tm_vport_map_update(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev); int hclge_pause_setup_hw(struct hclge_dev *hdev);
int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
......
...@@ -1264,7 +1264,7 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) ...@@ -1264,7 +1264,7 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret) if (ret)
return ret; return ret;
return 0; return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
} }
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment