Commit 8b5d10e4 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-hns3-refactor-for-MAC-table'

Huazhong Tan says:

====================
net: hns3: refactor for MAC table

This patchset refactors the MAC table management, configure
the MAC address asynchronously, instead of synchronously.
Base on this change, it also refines the handle of promisc
mode and filter table entries restoring after reset.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d4833896 039ba863
...@@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE { ...@@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
...@@ -70,6 +71,10 @@ enum hclge_mbx_vlan_cfg_subcode { ...@@ -70,6 +71,10 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
}; };
enum hclge_mbx_tbl_cfg_subcode {
HCLGE_MBX_VPORT_LIST_CLEAR,
};
#define HCLGE_MBX_MAX_MSG_SIZE 14 #define HCLGE_MBX_MAX_MSG_SIZE 14
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 #define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
......
...@@ -233,7 +233,6 @@ struct hnae3_ae_dev { ...@@ -233,7 +233,6 @@ struct hnae3_ae_dev {
struct list_head node; struct list_head node;
u32 flag; u32 flag;
unsigned long hw_err_reset_req; unsigned long hw_err_reset_req;
enum hnae3_reset_type reset_type;
void *priv; void *priv;
}; };
...@@ -270,6 +269,8 @@ struct hnae3_ae_dev { ...@@ -270,6 +269,8 @@ struct hnae3_ae_dev {
* Set loopback * Set loopback
* set_promisc_mode * set_promisc_mode
* Set promisc mode * Set promisc mode
* request_update_promisc_mode
* request to hclge(vf) to update promisc mode
* set_mtu() * set_mtu()
* set mtu * set mtu
* get_pauseparam() * get_pauseparam()
...@@ -354,8 +355,6 @@ struct hnae3_ae_dev { ...@@ -354,8 +355,6 @@ struct hnae3_ae_dev {
* Set vlan filter config of Ports * Set vlan filter config of Ports
* set_vf_vlan_filter() * set_vf_vlan_filter()
* Set vlan filter config of vf * Set vlan filter config of vf
* restore_vlan_table()
* Restore vlan filter entries after reset
* enable_hw_strip_rxvtag() * enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received * Enable/disable hardware strip vlan tag of packets received
* set_gro_en * set_gro_en
...@@ -408,6 +407,7 @@ struct hnae3_ae_ops { ...@@ -408,6 +407,7 @@ struct hnae3_ae_ops {
int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc); bool en_mc_pmc);
void (*request_update_promisc_mode)(struct hnae3_handle *handle);
int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
void (*get_pauseparam)(struct hnae3_handle *handle, void (*get_pauseparam)(struct hnae3_handle *handle,
...@@ -525,7 +525,6 @@ struct hnae3_ae_ops { ...@@ -525,7 +525,6 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd); struct ethtool_rxnfc *cmd);
int (*get_fd_all_rules)(struct hnae3_handle *handle, int (*get_fd_all_rules)(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd, u32 *rule_locs); struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable); void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id, int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys); u16 flow_id, struct flow_keys *fkeys);
...@@ -539,7 +538,6 @@ struct hnae3_ae_ops { ...@@ -539,7 +538,6 @@ struct hnae3_ae_ops {
void (*set_timer_task)(struct hnae3_handle *handle, bool enable); void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle); int (*mac_connect_phy)(struct hnae3_handle *handle);
void (*mac_disconnect_phy)(struct hnae3_handle *handle); void (*mac_disconnect_phy)(struct hnae3_handle *handle);
void (*restore_vlan_table)(struct hnae3_handle *handle);
int (*get_vf_config)(struct hnae3_handle *handle, int vf, int (*get_vf_config)(struct hnae3_handle *handle, int vf,
struct ifla_vf_info *ivf); struct ifla_vf_info *ivf);
int (*set_vf_link_state)(struct hnae3_handle *handle, int vf, int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
......
...@@ -262,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h) ...@@ -262,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump mac tnl status\n"); dev_info(&h->pdev->dev, "dump mac tnl status\n");
dev_info(&h->pdev->dev, "dump loopback\n"); dev_info(&h->pdev->dev, "dump loopback\n");
dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n"); dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN); memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]", strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
......
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
} while (0) } while (0)
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force); static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);
static const char hns3_driver_name[] = "hns3"; static const char hns3_driver_name[] = "hns3";
static const char hns3_driver_string[] = static const char hns3_driver_string[] =
...@@ -548,6 +547,13 @@ static int hns3_nic_uc_unsync(struct net_device *netdev, ...@@ -548,6 +547,13 @@ static int hns3_nic_uc_unsync(struct net_device *netdev,
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
/* need ignore the request of removing device address, because
* we store the device address and other addresses of uc list
* in the function's mac filter list.
*/
if (ether_addr_equal(addr, netdev->dev_addr))
return 0;
if (h->ae_algo->ops->rm_uc_addr) if (h->ae_algo->ops->rm_uc_addr)
return h->ae_algo->ops->rm_uc_addr(h, addr); return h->ae_algo->ops->rm_uc_addr(h, addr);
...@@ -595,34 +601,25 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) ...@@ -595,34 +601,25 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
u8 new_flags; u8 new_flags;
int ret;
new_flags = hns3_get_netdev_flags(netdev); new_flags = hns3_get_netdev_flags(netdev);
ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync); __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
if (ret) { __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
netdev_err(netdev, "sync uc address fail\n");
if (ret == -ENOSPC)
new_flags |= HNAE3_OVERFLOW_UPE;
}
if (netdev->flags & IFF_MULTICAST) {
ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
hns3_nic_mc_unsync);
if (ret) {
netdev_err(netdev, "sync mc address fail\n");
if (ret == -ENOSPC)
new_flags |= HNAE3_OVERFLOW_MPE;
}
}
/* User mode Promisc mode enable and vlan filtering is disabled to /* User mode Promisc mode enable and vlan filtering is disabled to
* let all packets in. MAC-VLAN Table overflow Promisc enabled and * let all packets in.
* vlan fitering is enabled
*/ */
hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
h->netdev_flags = new_flags; h->netdev_flags = new_flags;
hns3_update_promisc_mode(netdev, new_flags); hns3_request_update_promisc_mode(h);
}
void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
{
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
if (ops->request_update_promisc_mode)
ops->request_update_promisc_mode(handle);
} }
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
...@@ -2105,7 +2102,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2105,7 +2102,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev; ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data; ae_dev->flag = ent->driver_data;
ae_dev->reset_type = HNAE3_NONE_RESET;
hns3_get_dev_capability(pdev, ae_dev); hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev);
...@@ -3907,9 +3903,11 @@ static int hns3_init_mac_addr(struct net_device *netdev) ...@@ -3907,9 +3903,11 @@ static int hns3_init_mac_addr(struct net_device *netdev)
eth_hw_addr_random(netdev); eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n", dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr); netdev->dev_addr);
} else { } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
ether_addr_copy(netdev->dev_addr, mac_addr_temp); ether_addr_copy(netdev->dev_addr, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp);
} else {
return 0;
} }
if (h->ae_algo->ops->set_mac_addr) if (h->ae_algo->ops->set_mac_addr)
...@@ -3937,17 +3935,6 @@ static void hns3_uninit_phy(struct net_device *netdev) ...@@ -3937,17 +3935,6 @@ static void hns3_uninit_phy(struct net_device *netdev)
h->ae_algo->ops->mac_disconnect_phy(h); h->ae_algo->ops->mac_disconnect_phy(h);
} }
static int hns3_restore_fd_rules(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = 0;
if (h->ae_algo->ops->restore_fd_rules)
ret = h->ae_algo->ops->restore_fd_rules(h);
return ret;
}
static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
{ {
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
...@@ -4119,8 +4106,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) ...@@ -4119,8 +4106,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret; int ret;
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED) if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev); unregister_netdev(netdev);
...@@ -4191,56 +4176,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) ...@@ -4191,56 +4176,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
return hns3_nic_set_real_num_queue(ndev); return hns3_nic_set_real_num_queue(ndev);
} }
static int hns3_recover_hw_addr(struct net_device *ndev)
{
struct netdev_hw_addr_list *list;
struct netdev_hw_addr *ha, *tmp;
int ret = 0;
netif_addr_lock_bh(ndev);
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_uc_sync(ndev, ha->addr);
if (ret)
goto out;
}
/* go through and sync mc_addr entries to the device */
list = &ndev->mc;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_mc_sync(ndev, ha->addr);
if (ret)
goto out;
}
out:
netif_addr_unlock_bh(ndev);
return ret;
}
static void hns3_remove_hw_addr(struct net_device *netdev)
{
struct netdev_hw_addr_list *list;
struct netdev_hw_addr *ha, *tmp;
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
netif_addr_lock_bh(netdev);
/* go through and unsync uc_addr entries to the device */
list = &netdev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
hns3_nic_uc_unsync(netdev, ha->addr);
/* go through and unsync mc_addr entries to the device */
list = &netdev->mc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
if (ha->refcount > 1)
hns3_nic_mc_unsync(netdev, ha->addr);
netif_addr_unlock_bh(netdev);
}
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{ {
while (ring->next_to_clean != ring->next_to_use) { while (ring->next_to_clean != ring->next_to_use) {
...@@ -4399,7 +4334,6 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv) ...@@ -4399,7 +4334,6 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev; struct net_device *ndev = kinfo->netdev;
struct hns3_nic_priv *priv = netdev_priv(ndev); struct hns3_nic_priv *priv = netdev_priv(ndev);
...@@ -4407,15 +4341,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) ...@@ -4407,15 +4341,6 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0; return 0;
/* it is cumbersome for hardware to pick-and-choose entries for deletion
* from table space. Hence, for function reset software intervention is
* required to delete the entries
*/
if (hns3_dev_ongoing_func_reset(ae_dev)) {
hns3_remove_hw_addr(ndev);
hns3_del_all_fd_rules(ndev, false);
}
if (!netif_running(ndev)) if (!netif_running(ndev))
return 0; return 0;
...@@ -4482,6 +4407,9 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) ...@@ -4482,6 +4407,9 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
goto err_init_irq_fail; goto err_init_irq_fail;
} }
if (!hns3_is_phys_func(handle->pdev))
hns3_init_mac_addr(netdev);
ret = hns3_client_start(handle); ret = hns3_client_start(handle);
if (ret) { if (ret) {
dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
...@@ -4507,33 +4435,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) ...@@ -4507,33 +4435,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
return ret; return ret;
} }
static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
bool vlan_filter_enable;
int ret;
ret = hns3_init_mac_addr(netdev);
if (ret)
return ret;
ret = hns3_recover_hw_addr(netdev);
if (ret)
return ret;
ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
if (ret)
return ret;
vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(netdev, vlan_filter_enable);
if (handle->ae_algo->ops->restore_vlan_table)
handle->ae_algo->ops->restore_vlan_table(handle);
return hns3_restore_fd_rules(netdev);
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{ {
struct net_device *netdev = handle->kinfo.netdev; struct net_device *netdev = handle->kinfo.netdev;
...@@ -4583,9 +4484,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle, ...@@ -4583,9 +4484,6 @@ static int hns3_reset_notify(struct hnae3_handle *handle,
case HNAE3_UNINIT_CLIENT: case HNAE3_UNINIT_CLIENT:
ret = hns3_reset_notify_uninit_enet(handle); ret = hns3_reset_notify_uninit_enet(handle);
break; break;
case HNAE3_RESTORE_CLIENT:
ret = hns3_reset_notify_restore_enet(handle);
break;
default: default:
break; break;
} }
......
...@@ -576,15 +576,6 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) ...@@ -576,15 +576,6 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
writel(value, reg_addr + reg); writel(value, reg_addr + reg);
} }
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
ae_dev->reset_type == HNAE3_FLR_RESET ||
ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
}
#define hns3_read_dev(a, reg) \ #define hns3_read_dev(a, reg) \
hns3_read_reg((a)->io_base, (reg)) hns3_read_reg((a)->io_base, (reg))
...@@ -658,6 +649,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -658,6 +649,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags); int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB #ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle); void hns3_dcbnl_setup(struct hnae3_handle *handle);
......
...@@ -99,7 +99,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) ...@@ -99,7 +99,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
h->ae_algo->ops->set_promisc_mode(h, true, true); h->ae_algo->ops->set_promisc_mode(h, true, true);
} else { } else {
/* recover promisc mode before loopback test */ /* recover promisc mode before loopback test */
hns3_update_promisc_mode(ndev, h->netdev_flags); hns3_request_update_promisc_mode(h);
vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true; vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
hns3_enable_vlan_filter(ndev, vlan_filter_enable); hns3_enable_vlan_filter(ndev, vlan_filter_enable);
} }
......
...@@ -1441,6 +1441,49 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, ...@@ -1441,6 +1441,49 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
hclge_dbg_dump_qs_shaper_single(hdev, qsid); hclge_dbg_dump_qs_shaper_single(hdev, qsid);
} }
static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
bool is_unicast)
{
struct hclge_mac_node *mac_node, *tmp;
struct hclge_vport *vport;
struct list_head *list;
u32 func_id;
int ret;
ret = kstrtouint(cmd_buf, 0, &func_id);
if (ret < 0) {
dev_err(&hdev->pdev->dev,
"dump mac list: bad command string, ret = %d\n", ret);
return -EINVAL;
}
if (func_id >= hdev->num_alloc_vport) {
dev_err(&hdev->pdev->dev,
"function id(%u) is out of range(0-%u)\n", func_id,
hdev->num_alloc_vport - 1);
return -EINVAL;
}
vport = &hdev->vport[func_id];
list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
func_id, is_unicast ? "uc" : "mc");
dev_info(&hdev->pdev->dev, "mac address state\n");
spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
dev_info(&hdev->pdev->dev, "%pM %d\n",
mac_node->mac_addr, mac_node->state);
}
spin_unlock_bh(&vport->mac_list_lock);
return 0;
}
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{ {
#define DUMP_REG "dump reg" #define DUMP_REG "dump reg"
...@@ -1485,6 +1528,14 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) ...@@ -1485,6 +1528,14 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
} else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
hclge_dbg_dump_qs_shaper(hdev, hclge_dbg_dump_qs_shaper(hdev,
&cmd_buf[sizeof("dump qs shaper")]); &cmd_buf[sizeof("dump qs shaper")]);
} else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
hclge_dbg_dump_mac_list(hdev,
&cmd_buf[sizeof("dump uc mac list")],
true);
} else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
hclge_dbg_dump_mac_list(hdev,
&cmd_buf[sizeof("dump mc mac list")],
false);
} else { } else {
dev_info(&hdev->pdev->dev, "unknown command\n"); dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL; return -EINVAL;
......
...@@ -62,14 +62,16 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev); ...@@ -62,14 +62,16 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev); static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev); static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static void hclge_clear_arfs_rules(struct hnae3_handle *handle); static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev, static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr); unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev); static int hclge_set_default_loopback(struct hclge_dev *hdev);
static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo; static struct hnae3_ae_algo ae_algo;
static struct workqueue_struct *hclge_wq; static struct workqueue_struct *hclge_wq;
...@@ -1687,6 +1689,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) ...@@ -1687,6 +1689,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list); INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list); INIT_LIST_HEAD(&vport->mc_mac_list);
spin_lock_init(&vport->mac_list_lock);
if (i == 0) if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport); ret = hclge_vport_setup(vport, tqp_main_vport);
...@@ -3729,22 +3732,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev) ...@@ -3729,22 +3732,13 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
if (ret)
return ret;
return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
} }
static int hclge_reset_prepare(struct hclge_dev *hdev) static int hclge_reset_prepare(struct hclge_dev *hdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret; int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.reset_cnt++; hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */ /* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
...@@ -3806,7 +3800,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev) ...@@ -3806,7 +3800,6 @@ static int hclge_reset_rebuild(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies; hdev->last_reset_time = jiffies;
hdev->rst_stats.reset_fail_cnt = 0; hdev->rst_stats.reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++; hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
/* if default_reset_request has a higher level reset request, /* if default_reset_request has a higher level reset request,
...@@ -3973,6 +3966,8 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev) ...@@ -3973,6 +3966,8 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
* updated when it is triggered by mbx. * updated when it is triggered by mbx.
*/ */
hclge_update_link_status(hdev); hclge_update_link_status(hdev);
hclge_sync_mac_table(hdev);
hclge_sync_promisc_mode(hdev);
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed; delta = jiffies - hdev->last_serv_processed;
...@@ -4722,7 +4717,8 @@ static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, ...@@ -4722,7 +4717,8 @@ static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Set promisc mode fail, status is %d.\n", ret); "failed to set vport %d promisc mode, ret = %d.\n",
param->vf_id, ret);
return ret; return ret;
} }
...@@ -4772,6 +4768,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, ...@@ -4772,6 +4768,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc); en_bc_pmc);
} }
static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
}
static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
{ {
struct hclge_get_fd_mode_cmd *req; struct hclge_get_fd_mode_cmd *req;
...@@ -6924,8 +6928,22 @@ static void hclge_ae_stop(struct hnae3_handle *handle) ...@@ -6924,8 +6928,22 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int hclge_vport_start(struct hclge_vport *vport) int hclge_vport_start(struct hclge_vport *vport)
{ {
struct hclge_dev *hdev = vport->back;
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport->last_active_jiffies = jiffies; vport->last_active_jiffies = jiffies;
if (test_bit(vport->vport_id, hdev->vport_config_block)) {
if (vport->vport_id) {
hclge_restore_mac_table_common(vport);
hclge_restore_vport_vlan_table(vport);
} else {
hclge_restore_hw_table(hdev);
}
}
clear_bit(vport->vport_id, hdev->vport_config_block);
return 0; return 0;
} }
...@@ -6962,17 +6980,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, ...@@ -6962,17 +6980,11 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
} }
if (op == HCLGE_MAC_VLAN_ADD) { if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) { if (!resp_code || resp_code == 1)
return 0; return 0;
} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) { else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
dev_err(&hdev->pdev->dev, resp_code == HCLGE_ADD_MC_OVERFLOW)
"add mac addr failed for uc_overflow.\n");
return -ENOSPC; return -ENOSPC;
} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
return -ENOSPC;
}
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"add mac addr failed for undefined, code=%u.\n", "add mac addr failed for undefined, code=%u.\n",
...@@ -7196,52 +7208,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, ...@@ -7196,52 +7208,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
return cfg_status; return cfg_status;
} }
static int hclge_init_umv_space(struct hclge_dev *hdev)
{
u16 allocated_size = 0;
int ret;
ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
true);
if (ret)
return ret;
if (allocated_size < hdev->wanted_umv_size)
dev_warn(&hdev->pdev->dev,
"Alloc umv space failed, want %u, get %u\n",
hdev->wanted_umv_size, allocated_size);
mutex_init(&hdev->umv_mutex);
hdev->max_umv_size = allocated_size;
/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
* preserve some unicast mac vlan table entries shared by pf
* and its vfs.
*/
hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_req_vfs + 2);
return 0;
}
static int hclge_uninit_umv_space(struct hclge_dev *hdev)
{
int ret;
if (hdev->max_umv_size > 0) {
ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
false);
if (ret)
return ret;
hdev->max_umv_size = 0;
}
mutex_destroy(&hdev->umv_mutex);
return 0;
}
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc) u16 *allocated_size)
{ {
struct hclge_umv_spc_alc_cmd *req; struct hclge_umv_spc_alc_cmd *req;
struct hclge_desc desc; struct hclge_desc desc;
...@@ -7249,25 +7217,44 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, ...@@ -7249,25 +7217,44 @@ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
req = (struct hclge_umv_spc_alc_cmd *)desc.data; req = (struct hclge_umv_spc_alc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
if (!is_alloc)
hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
req->space_size = cpu_to_le32(space_size); req->space_size = cpu_to_le32(space_size);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
"%s umv space failed for cmd_send, ret =%d\n", ret);
is_alloc ? "allocate" : "free", ret);
return ret; return ret;
} }
if (is_alloc && allocated_size) if (allocated_size)
*allocated_size = le32_to_cpu(desc.data[1]); *allocated_size = le32_to_cpu(desc.data[1]);
return 0; return 0;
} }
static int hclge_init_umv_space(struct hclge_dev *hdev)
{
u16 allocated_size = 0;
int ret;
ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
if (ret)
return ret;
if (allocated_size < hdev->wanted_umv_size)
dev_warn(&hdev->pdev->dev,
"failed to alloc umv space, want %u, get %u\n",
hdev->wanted_umv_size, allocated_size);
hdev->max_umv_size = allocated_size;
hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
return 0;
}
static void hclge_reset_umv_space(struct hclge_dev *hdev) static void hclge_reset_umv_space(struct hclge_dev *hdev)
{ {
struct hclge_vport *vport; struct hclge_vport *vport;
...@@ -7278,21 +7265,25 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev) ...@@ -7278,21 +7265,25 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
vport->used_umv_num = 0; vport->used_umv_num = 0;
} }
mutex_lock(&hdev->umv_mutex); mutex_lock(&hdev->vport_lock);
hdev->share_umv_size = hdev->priv_umv_size + hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_req_vfs + 2); hdev->max_umv_size % (hdev->num_alloc_vport + 1);
mutex_unlock(&hdev->umv_mutex); mutex_unlock(&hdev->vport_lock);
} }
static bool hclge_is_umv_space_full(struct hclge_vport *vport) static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
bool is_full; bool is_full;
mutex_lock(&hdev->umv_mutex); if (need_lock)
mutex_lock(&hdev->vport_lock);
is_full = (vport->used_umv_num >= hdev->priv_umv_size && is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
hdev->share_umv_size == 0); hdev->share_umv_size == 0);
mutex_unlock(&hdev->umv_mutex);
if (need_lock)
mutex_unlock(&hdev->vport_lock);
return is_full; return is_full;
} }
...@@ -7301,7 +7292,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) ...@@ -7301,7 +7292,6 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
mutex_lock(&hdev->umv_mutex);
if (is_free) { if (is_free) {
if (vport->used_umv_num > hdev->priv_umv_size) if (vport->used_umv_num > hdev->priv_umv_size)
hdev->share_umv_size++; hdev->share_umv_size++;
...@@ -7314,7 +7304,99 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) ...@@ -7314,7 +7304,99 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
hdev->share_umv_size--; hdev->share_umv_size--;
vport->used_umv_num++; vport->used_umv_num++;
} }
mutex_unlock(&hdev->umv_mutex); }
static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
const u8 *mac_addr)
{
struct hclge_mac_node *mac_node, *tmp;
list_for_each_entry_safe(mac_node, tmp, list, node)
if (ether_addr_equal(mac_addr, mac_node->mac_addr))
return mac_node;
return NULL;
}
static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
enum HCLGE_MAC_NODE_STATE state)
{
switch (state) {
/* from set_rx_mode or tmp_add_list */
case HCLGE_MAC_TO_ADD:
if (mac_node->state == HCLGE_MAC_TO_DEL)
mac_node->state = HCLGE_MAC_ACTIVE;
break;
/* only from set_rx_mode */
case HCLGE_MAC_TO_DEL:
if (mac_node->state == HCLGE_MAC_TO_ADD) {
list_del(&mac_node->node);
kfree(mac_node);
} else {
mac_node->state = HCLGE_MAC_TO_DEL;
}
break;
/* only from tmp_add_list, the mac_node->state won't be
* ACTIVE.
*/
case HCLGE_MAC_ACTIVE:
if (mac_node->state == HCLGE_MAC_TO_ADD)
mac_node->state = HCLGE_MAC_ACTIVE;
break;
}
}
int hclge_update_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_NODE_STATE state,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
struct hclge_mac_node *mac_node;
struct list_head *list;
list = (mac_type == HCLGE_MAC_ADDR_UC) ?
&vport->uc_mac_list : &vport->mc_mac_list;
spin_lock_bh(&vport->mac_list_lock);
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
* new state, or just remove it, or do nothing.
*/
mac_node = hclge_find_mac_node(list, addr);
if (mac_node) {
hclge_update_mac_node(mac_node, state);
spin_unlock_bh(&vport->mac_list_lock);
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
return 0;
}
/* if this address is never added, unnecessary to delete */
if (state == HCLGE_MAC_TO_DEL) {
spin_unlock_bh(&vport->mac_list_lock);
dev_err(&hdev->pdev->dev,
"failed to delete address %pM from mac list\n",
addr);
return -ENOENT;
}
mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
if (!mac_node) {
spin_unlock_bh(&vport->mac_list_lock);
return -ENOMEM;
}
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
mac_node->state = state;
ether_addr_copy(mac_node->mac_addr, addr);
list_add_tail(&mac_node->node, list);
spin_unlock_bh(&vport->mac_list_lock);
return 0;
} }
static int hclge_add_uc_addr(struct hnae3_handle *handle, static int hclge_add_uc_addr(struct hnae3_handle *handle,
...@@ -7322,7 +7404,8 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle, ...@@ -7322,7 +7404,8 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_add_uc_addr_common(vport, addr); return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
addr);
} }
int hclge_add_uc_addr_common(struct hclge_vport *vport, int hclge_add_uc_addr_common(struct hclge_vport *vport,
...@@ -7361,13 +7444,17 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, ...@@ -7361,13 +7444,17 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
*/ */
ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false); ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
if (ret == -ENOENT) { if (ret == -ENOENT) {
if (!hclge_is_umv_space_full(vport)) { mutex_lock(&hdev->vport_lock);
if (!hclge_is_umv_space_full(vport, false)) {
ret = hclge_add_mac_vlan_tbl(vport, &req, NULL); ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
if (!ret) if (!ret)
hclge_update_umv_space(vport, false); hclge_update_umv_space(vport, false);
mutex_unlock(&hdev->vport_lock);
return ret; return ret;
} }
mutex_unlock(&hdev->vport_lock);
if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
hdev->priv_umv_size); hdev->priv_umv_size);
...@@ -7393,7 +7480,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle, ...@@ -7393,7 +7480,8 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_rm_uc_addr_common(vport, addr); return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
addr);
} }
int hclge_rm_uc_addr_common(struct hclge_vport *vport, int hclge_rm_uc_addr_common(struct hclge_vport *vport,
...@@ -7416,8 +7504,13 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, ...@@ -7416,8 +7504,13 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false); hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req); ret = hclge_remove_mac_vlan_tbl(vport, &req);
if (!ret) if (!ret) {
mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true); hclge_update_umv_space(vport, true);
mutex_unlock(&hdev->vport_lock);
} else if (ret == -ENOENT) {
ret = 0;
}
return ret; return ret;
} }
...@@ -7427,7 +7520,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, ...@@ -7427,7 +7520,8 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_add_mc_addr_common(vport, addr); return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
addr);
} }
int hclge_add_mc_addr_common(struct hclge_vport *vport, int hclge_add_mc_addr_common(struct hclge_vport *vport,
...@@ -7459,7 +7553,9 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, ...@@ -7459,7 +7553,9 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
return status; return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc); status = hclge_add_mac_vlan_tbl(vport, &req, desc);
if (status == -ENOSPC) /* if already overflow, not to print each time */
if (status == -ENOSPC &&
!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
return status; return status;
...@@ -7470,7 +7566,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle, ...@@ -7470,7 +7566,8 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
return hclge_rm_mc_addr_common(vport, addr); return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
addr);
} }
int hclge_rm_mc_addr_common(struct hclge_vport *vport, int hclge_rm_mc_addr_common(struct hclge_vport *vport,
...@@ -7505,111 +7602,354 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, ...@@ -7505,111 +7602,354 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
/* Not all the vfid is zero, update the vfid */ /* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc); status = hclge_add_mac_vlan_tbl(vport, &req, desc);
} else { } else if (status == -ENOENT) {
/* Maybe this mac address is in mta table, but it cannot be
* deleted here because an entry of mta represents an address
* range rather than a specific address. the delete action to
* all entries will take effect in update_mta_status called by
* hns3_nic_set_rx_mode.
*/
status = 0; status = 0;
} }
return status; return status;
} }
void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type) struct list_head *list,
int (*sync)(struct hclge_vport *,
const unsigned char *))
{ {
struct hclge_vport_mac_addr_cfg *mac_cfg; struct hclge_mac_node *mac_node, *tmp;
struct list_head *list; int ret;
if (!vport->vport_id) list_for_each_entry_safe(mac_node, tmp, list, node) {
return; ret = sync(vport, mac_node->mac_addr);
if (!ret) {
mac_node->state = HCLGE_MAC_ACTIVE;
} else {
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
&vport->state);
break;
}
}
}
mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
if (!mac_cfg) struct list_head *list,
return; int (*unsync)(struct hclge_vport *,
const unsigned char *))
{
struct hclge_mac_node *mac_node, *tmp;
int ret;
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = unsync(vport, mac_node->mac_addr);
if (!ret || ret == -ENOENT) {
list_del(&mac_node->node);
kfree(mac_node);
} else {
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
&vport->state);
break;
}
}
}
mac_cfg->hd_tbl_status = true; static bool hclge_sync_from_add_list(struct list_head *add_list,
memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); struct list_head *mac_list)
{
struct hclge_mac_node *mac_node, *tmp, *new_node;
bool all_added = true;
list = (mac_type == HCLGE_MAC_ADDR_UC) ? list_for_each_entry_safe(mac_node, tmp, add_list, node) {
&vport->uc_mac_list : &vport->mc_mac_list; if (mac_node->state == HCLGE_MAC_TO_ADD)
all_added = false;
/* if the mac address from tmp_add_list is not in the
* uc/mc_mac_list, it means have received a TO_DEL request
* during the time window of adding the mac address into mac
* table. if mac_node state is ACTIVE, then change it to TO_DEL,
* then it will be removed at next time. else it must be TO_ADD,
* this address hasn't been added into mac table,
* so just remove the mac node.
*/
new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
hclge_update_mac_node(new_node, mac_node->state);
list_del(&mac_node->node);
kfree(mac_node);
} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
mac_node->state = HCLGE_MAC_TO_DEL;
list_del(&mac_node->node);
list_add_tail(&mac_node->node, mac_list);
} else {
list_del(&mac_node->node);
kfree(mac_node);
}
}
list_add_tail(&mac_cfg->node, list); return all_added;
} }
void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, static void hclge_sync_from_del_list(struct list_head *del_list,
bool is_write_tbl, struct list_head *mac_list)
{
struct hclge_mac_node *mac_node, *tmp, *new_node;
list_for_each_entry_safe(mac_node, tmp, del_list, node) {
new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
/* If the mac addr exists in the mac list, it means
* received a new TO_ADD request during the time window
* of configuring the mac address. For the mac node
* state is TO_ADD, and the address is already in the
* in the hardware(due to delete fail), so we just need
* to change the mac node state to ACTIVE.
*/
new_node->state = HCLGE_MAC_ACTIVE;
list_del(&mac_node->node);
kfree(mac_node);
} else {
list_del(&mac_node->node);
list_add_tail(&mac_node->node, mac_list);
}
}
}
static void hclge_update_overflow_flags(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type,
bool is_all_added)
{
if (mac_type == HCLGE_MAC_ADDR_UC) {
if (is_all_added)
vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
else
vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
} else {
if (is_all_added)
vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
else
vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
}
}
static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type) enum HCLGE_MAC_ADDR_TYPE mac_type)
{ {
struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; struct hclge_mac_node *mac_node, *tmp, *new_node;
struct list_head tmp_add_list, tmp_del_list;
struct list_head *list; struct list_head *list;
bool uc_flag, mc_flag; bool all_added;
INIT_LIST_HEAD(&tmp_add_list);
INIT_LIST_HEAD(&tmp_del_list);
/* move the mac addr to the tmp_add_list and tmp_del_list, then
* we can add/delete these mac addr outside the spin lock
*/
list = (mac_type == HCLGE_MAC_ADDR_UC) ? list = (mac_type == HCLGE_MAC_ADDR_UC) ?
&vport->uc_mac_list : &vport->mc_mac_list; &vport->uc_mac_list : &vport->mc_mac_list;
uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; spin_lock_bh(&vport->mac_list_lock);
mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
list_for_each_entry_safe(mac_cfg, tmp, list, node) { list_for_each_entry_safe(mac_node, tmp, list, node) {
if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { switch (mac_node->state) {
if (uc_flag && mac_cfg->hd_tbl_status) case HCLGE_MAC_TO_DEL:
hclge_rm_uc_addr_common(vport, mac_addr); list_del(&mac_node->node);
list_add_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
if (!new_node)
goto stop_traverse;
ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
new_node->state = mac_node->state;
list_add_tail(&new_node->node, &tmp_add_list);
break;
default:
break;
}
}
if (mc_flag && mac_cfg->hd_tbl_status) stop_traverse:
hclge_rm_mc_addr_common(vport, mac_addr); spin_unlock_bh(&vport->mac_list_lock);
list_del(&mac_cfg->node); /* delete first, in order to get max mac table space for adding */
kfree(mac_cfg); if (mac_type == HCLGE_MAC_ADDR_UC) {
break; hclge_unsync_vport_mac_list(vport, &tmp_del_list,
hclge_rm_uc_addr_common);
hclge_sync_vport_mac_list(vport, &tmp_add_list,
hclge_add_uc_addr_common);
} else {
hclge_unsync_vport_mac_list(vport, &tmp_del_list,
hclge_rm_mc_addr_common);
hclge_sync_vport_mac_list(vport, &tmp_add_list,
hclge_add_mc_addr_common);
} }
/* if some mac addresses were added/deleted fail, move back to the
* mac_list, and retry at next time.
*/
spin_lock_bh(&vport->mac_list_lock);
hclge_sync_from_del_list(&tmp_del_list, list);
all_added = hclge_sync_from_add_list(&tmp_add_list, list);
spin_unlock_bh(&vport->mac_list_lock);
hclge_update_overflow_flags(vport, mac_type, all_added);
}
static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
if (test_bit(vport->vport_id, hdev->vport_config_block))
return false;
if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
return true;
return false;
}
static void hclge_sync_mac_table(struct hclge_dev *hdev)
{
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
if (!hclge_need_sync_mac_table(vport))
continue;
hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
} }
} }
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type) enum HCLGE_MAC_ADDR_TYPE mac_type)
{ {
struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
struct list_head *list; struct hclge_mac_node *mac_cfg, *tmp;
struct hclge_dev *hdev = vport->back;
struct list_head tmp_del_list, *list;
int ret;
list = (mac_type == HCLGE_MAC_ADDR_UC) ? if (mac_type == HCLGE_MAC_ADDR_UC) {
&vport->uc_mac_list : &vport->mc_mac_list; list = &vport->uc_mac_list;
unsync = hclge_rm_uc_addr_common;
} else {
list = &vport->mc_mac_list;
unsync = hclge_rm_mc_addr_common;
}
list_for_each_entry_safe(mac_cfg, tmp, list, node) { INIT_LIST_HEAD(&tmp_del_list);
if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); if (!is_del_list)
set_bit(vport->vport_id, hdev->vport_config_block);
if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) spin_lock_bh(&vport->mac_list_lock);
hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
mac_cfg->hd_tbl_status = false; list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
list_add_tail(&mac_cfg->node, &tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) { if (is_del_list) {
list_del(&mac_cfg->node); list_del(&mac_cfg->node);
kfree(mac_cfg); kfree(mac_cfg);
} }
break;
}
}
spin_unlock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
ret = unsync(vport, mac_cfg->mac_addr);
if (!ret || ret == -ENOENT) {
/* clear all mac addr from hardware, but remain these
* mac addr in the mac list, and restore them after
* vf reset finished.
*/
if (!is_del_list &&
mac_cfg->state == HCLGE_MAC_ACTIVE) {
mac_cfg->state = HCLGE_MAC_TO_ADD;
} else {
list_del(&mac_cfg->node);
kfree(mac_cfg);
}
} else if (is_del_list) {
mac_cfg->state = HCLGE_MAC_TO_DEL;
}
} }
spin_lock_bh(&vport->mac_list_lock);
hclge_sync_from_del_list(&tmp_del_list, list);
spin_unlock_bh(&vport->mac_list_lock);
} }
void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) /* remove all mac address when uninitailize */
static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type)
{
struct hclge_mac_node *mac_node, *tmp;
struct hclge_dev *hdev = vport->back;
struct list_head tmp_del_list, *list;
INIT_LIST_HEAD(&tmp_del_list);
list = (mac_type == HCLGE_MAC_ADDR_UC) ?
&vport->uc_mac_list : &vport->mc_mac_list;
spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_node->node);
list_add_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
list_del(&mac_node->node);
kfree(mac_node);
break;
}
}
spin_unlock_bh(&vport->mac_list_lock);
if (mac_type == HCLGE_MAC_ADDR_UC)
hclge_unsync_vport_mac_list(vport, &tmp_del_list,
hclge_rm_uc_addr_common);
else
hclge_unsync_vport_mac_list(vport, &tmp_del_list,
hclge_rm_mc_addr_common);
if (!list_empty(&tmp_del_list))
dev_warn(&hdev->pdev->dev,
"uninit %s mac list for vport %u not completely.\n",
mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
vport->vport_id);
list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
list_del(&mac_node->node);
kfree(mac_node);
}
}
static void hclge_uninit_mac_table(struct hclge_dev *hdev)
{ {
struct hclge_vport_mac_addr_cfg *mac, *tmp;
struct hclge_vport *vport; struct hclge_vport *vport;
int i; int i;
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i]; vport = &hdev->vport[i];
list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
list_del(&mac->node); hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
kfree(mac);
}
list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
list_del(&mac->node);
kfree(mac);
}
} }
} }
...@@ -7773,12 +8113,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) ...@@ -7773,12 +8113,57 @@ static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr); ether_addr_copy(p, hdev->hw.mac.mac_addr);
} }
int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
const u8 *old_addr, const u8 *new_addr)
{
struct list_head *list = &vport->uc_mac_list;
struct hclge_mac_node *old_node, *new_node;
new_node = hclge_find_mac_node(list, new_addr);
if (!new_node) {
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
if (!new_node)
return -ENOMEM;
new_node->state = HCLGE_MAC_TO_ADD;
ether_addr_copy(new_node->mac_addr, new_addr);
list_add(&new_node->node, list);
} else {
if (new_node->state == HCLGE_MAC_TO_DEL)
new_node->state = HCLGE_MAC_ACTIVE;
/* make sure the new addr is in the list head, avoid dev
* addr may be not re-added into mac table for the umv space
* limitation after global/imp reset which will clear mac
* table by hardware.
*/
list_move(&new_node->node, list);
}
if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
old_node = hclge_find_mac_node(list, old_addr);
if (old_node) {
if (old_node->state == HCLGE_MAC_TO_ADD) {
list_del(&old_node->node);
kfree(old_node);
} else {
old_node->state = HCLGE_MAC_TO_DEL;
}
}
}
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
return 0;
}
static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
bool is_first) bool is_first)
{ {
const unsigned char *new_addr = (const unsigned char *)p; const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
unsigned char *old_addr = NULL;
int ret; int ret;
/* mac addr check */ /* mac addr check */
...@@ -7786,39 +8171,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, ...@@ -7786,39 +8171,42 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) || is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) { is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Change uc mac err! invalid mac:%pM.\n", "change uc mac err! invalid mac: %pM.\n",
new_addr); new_addr);
return -EINVAL; return -EINVAL;
} }
if ((!is_first || is_kdump_kernel()) && ret = hclge_pause_addr_cfg(hdev, new_addr);
hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
dev_warn(&hdev->pdev->dev,
"remove old uc mac address fail.\n");
ret = hclge_add_uc_addr(handle, new_addr);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"add uc mac address fail, ret =%d.\n", "failed to configure mac pause address, ret = %d\n",
ret); ret);
return ret;
if (!is_first &&
hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
dev_err(&hdev->pdev->dev,
"restore uc mac address fail.\n");
return -EIO;
} }
ret = hclge_pause_addr_cfg(hdev, new_addr); if (!is_first)
old_addr = hdev->hw.mac.mac_addr;
spin_lock_bh(&vport->mac_list_lock);
ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"configure mac pause address fail, ret =%d.\n", "failed to change the mac addr:%pM, ret = %d\n",
ret); new_addr, ret);
return -EIO; spin_unlock_bh(&vport->mac_list_lock);
}
if (!is_first)
hclge_pause_addr_cfg(hdev, old_addr);
return ret;
}
/* we must update dev addr with spin lock protect, preventing dev addr
* being removed by set_rx_mode path.
*/
ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
spin_unlock_bh(&vport->mac_list_lock);
hclge_task_schedule(hdev, 0);
return 0; return 0;
} }
...@@ -8398,42 +8786,80 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) ...@@ -8398,42 +8786,80 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
} }
} }
static void hclge_restore_vlan_table(struct hnae3_handle *handle) void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
{ {
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 vlan_proto; u16 vlan_proto;
u16 state, vlan_id; u16 vlan_id;
int i; u16 state;
int ret;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto; vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag; vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
state = vport->port_base_vlan_cfg.state; state = vport->port_base_vlan_cfg.state;
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) { if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
vport->vport_id, vlan_id, vport->vport_id, vlan_id,
false); false);
continue; return;
} }
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
int ret;
if (!vlan->hd_tbl_status)
continue;
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vport->vport_id,
vlan->vlan_id, false); vlan->vlan_id, false);
if (ret) if (ret)
break; break;
vlan->hd_tbl_status = true;
}
}
/* For global reset and imp reset, hardware will clear the mac table,
* so we change the mac address state from ACTIVE to TO_ADD, then they
* can be restored in the service task after reset complete. Furtherly,
* the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
* be restored after reset, so just remove these mac nodes from mac_list.
*/
static void hclge_mac_node_convert_for_reset(struct list_head *list)
{
struct hclge_mac_node *mac_node, *tmp;
list_for_each_entry_safe(mac_node, tmp, list, node) {
if (mac_node->state == HCLGE_MAC_ACTIVE) {
mac_node->state = HCLGE_MAC_TO_ADD;
} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
list_del(&mac_node->node);
kfree(mac_node);
} }
} }
} }
void hclge_restore_mac_table_common(struct hclge_vport *vport)
{
spin_lock_bh(&vport->mac_list_lock);
hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
spin_unlock_bh(&vport->mac_list_lock);
}
static void hclge_restore_hw_table(struct hclge_dev *hdev)
{
struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
hclge_restore_mac_table_common(vport);
hclge_restore_vport_vlan_table(vport);
set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
}
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{ {
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
...@@ -9748,7 +10174,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, ...@@ -9748,7 +10174,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"vf %d vlan table is full, enable spoof check may cause its packet send fail\n", "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
vf); vf);
else if (enable && hclge_is_umv_space_full(vport)) else if (enable && hclge_is_umv_space_full(vport, true))
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"vf %d mac table is full, enable spoof check may cause its packet send fail\n", "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
vf); vf);
...@@ -9925,8 +10351,16 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9925,8 +10351,16 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
set_bit(HCLGE_STATE_DOWN, &hdev->state); set_bit(HCLGE_STATE_DOWN, &hdev->state);
hclge_stats_clear(hdev); hclge_stats_clear(hdev);
/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
* so here should not clean table in memory.
*/
if (hdev->reset_type == HNAE3_IMP_RESET ||
hdev->reset_type == HNAE3_GLOBAL_RESET) {
memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
hclge_reset_umv_space(hdev);
}
ret = hclge_cmd_init(hdev); ret = hclge_cmd_init(hdev);
if (ret) { if (ret) {
...@@ -9940,8 +10374,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9940,8 +10374,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
hclge_reset_umv_space(hdev);
ret = hclge_mac_init(hdev); ret = hclge_mac_init(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
...@@ -10037,12 +10469,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -10037,12 +10469,11 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev); hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev); hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev); hclge_state_uninit(hdev);
hclge_uninit_mac_table(hdev);
if (mac->phydev) if (mac->phydev)
mdiobus_unregister(mac->mdio_bus); mdiobus_unregister(mac->mdio_bus);
hclge_uninit_umv_space(hdev);
/* Disable MISC vector(vector0) */ /* Disable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, false); hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq); synchronize_irq(hdev->misc_vector.vector_irq);
...@@ -10056,7 +10487,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -10056,7 +10487,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_misc_irq_uninit(hdev); hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev); hclge_pci_uninit(hdev);
mutex_destroy(&hdev->vport_lock); mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_mac_table(hdev);
hclge_uninit_vport_vlan_table(hdev); hclge_uninit_vport_vlan_table(hdev);
ae_dev->priv = NULL; ae_dev->priv = NULL;
} }
...@@ -10666,6 +11096,30 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) ...@@ -10666,6 +11096,30 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return hclge_config_gro(hdev, enable); return hclge_config_gro(hdev, enable);
} }
static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
{
struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
u8 tmp_flags = 0;
int ret;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
vport->last_promisc_flags = vport->overflow_promisc_flags;
}
if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
tmp_flags & HNAE3_MPE);
if (!ret) {
clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
hclge_enable_vlan_filter(handle,
tmp_flags & HNAE3_VLAN_FLTR);
}
}
}
static const struct hnae3_ae_ops hclge_ops = { static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev, .init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev,
...@@ -10678,6 +11132,7 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -10678,6 +11132,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_vector = hclge_get_vector, .get_vector = hclge_get_vector,
.put_vector = hclge_put_vector, .put_vector = hclge_put_vector,
.set_promisc_mode = hclge_set_promisc_mode, .set_promisc_mode = hclge_set_promisc_mode,
.request_update_promisc_mode = hclge_request_update_promisc_mode,
.set_loopback = hclge_set_loopback, .set_loopback = hclge_set_loopback,
.start = hclge_ae_start, .start = hclge_ae_start,
.stop = hclge_ae_stop, .stop = hclge_ae_stop,
...@@ -10739,7 +11194,6 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -10739,7 +11194,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_rule_cnt = hclge_get_fd_rule_cnt, .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
.get_fd_rule_info = hclge_get_fd_rule_info, .get_fd_rule_info = hclge_get_fd_rule_info,
.get_fd_all_rules = hclge_get_all_rules, .get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd, .enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs, .add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd, .dbg_run_cmd = hclge_dbg_run_cmd,
...@@ -10752,7 +11206,6 @@ static const struct hnae3_ae_ops hclge_ops = { ...@@ -10752,7 +11206,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_timer_task = hclge_set_timer_task, .set_timer_task = hclge_set_timer_task,
.mac_connect_phy = hclge_mac_connect_phy, .mac_connect_phy = hclge_mac_connect_phy,
.mac_disconnect_phy = hclge_mac_disconnect_phy, .mac_disconnect_phy = hclge_mac_disconnect_phy,
.restore_vlan_table = hclge_restore_vlan_table,
.get_vf_config = hclge_get_vf_config, .get_vf_config = hclge_get_vf_config,
.set_vf_link_state = hclge_set_vf_link_state, .set_vf_link_state = hclge_set_vf_link_state,
.set_vf_spoofchk = hclge_set_vf_spoofchk, .set_vf_spoofchk = hclge_set_vf_spoofchk,
......
...@@ -217,6 +217,7 @@ enum HCLGE_DEV_STATE { ...@@ -217,6 +217,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_STATISTICS_UPDATING, HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE, HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING, HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL, HCLGE_STATE_RST_FAIL,
HCLGE_STATE_MAX HCLGE_STATE_MAX
}; };
...@@ -630,9 +631,15 @@ struct hclge_fd_ad_data { ...@@ -630,9 +631,15 @@ struct hclge_fd_ad_data {
u16 rule_id; u16 rule_id;
}; };
struct hclge_vport_mac_addr_cfg { enum HCLGE_MAC_NODE_STATE {
HCLGE_MAC_TO_ADD,
HCLGE_MAC_TO_DEL,
HCLGE_MAC_ACTIVE
};
struct hclge_mac_node {
struct list_head node; struct list_head node;
int hd_tbl_status; enum HCLGE_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
}; };
...@@ -805,6 +812,8 @@ struct hclge_dev { ...@@ -805,6 +812,8 @@ struct hclge_dev {
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)]; unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
struct hclge_fd_cfg fd_cfg; struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list; struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */ spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
...@@ -822,7 +831,6 @@ struct hclge_dev { ...@@ -822,7 +831,6 @@ struct hclge_dev {
u16 priv_umv_size; u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */ /* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size; u16 share_umv_size;
struct mutex umv_mutex; /* protect share_umv_size */
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE); HCLGE_MAC_TNL_LOG_SIZE);
...@@ -866,6 +874,7 @@ struct hclge_rss_tuple_cfg { ...@@ -866,6 +874,7 @@ struct hclge_rss_tuple_cfg {
enum HCLGE_VPORT_STATE { enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE, HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
HCLGE_VPORT_STATE_MAX HCLGE_VPORT_STATE_MAX
}; };
...@@ -922,6 +931,10 @@ struct hclge_vport { ...@@ -922,6 +931,10 @@ struct hclge_vport {
u32 mps; /* Max packet size */ u32 mps; /* Max packet size */
struct hclge_vf_info vf_info; struct hclge_vf_info vf_info;
u8 overflow_promisc_flags;
u8 last_promisc_flags;
spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */ struct list_head mc_mac_list; /* Store VF multicast table */
struct list_head vlan_list; /* Store VF vlan table */ struct list_head vlan_list; /* Store VF vlan table */
...@@ -977,16 +990,18 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf); ...@@ -977,16 +990,18 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev, int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type); enum hnae3_reset_notify_type type);
void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, int hclge_update_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type); enum HCLGE_MAC_NODE_STATE state,
void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, enum HCLGE_MAC_ADDR_TYPE mac_type,
bool is_write_tbl, const unsigned char *addr);
enum HCLGE_MAC_ADDR_TYPE mac_type); int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
const u8 *old_addr, const u8 *new_addr);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type); enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
void hclge_restore_mac_table_common(struct hclge_vport *vport);
void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info); struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
......
...@@ -275,26 +275,17 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, ...@@ -275,26 +275,17 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
if (!is_valid_ether_addr(mac_addr)) if (!is_valid_ether_addr(mac_addr))
return -EINVAL; return -EINVAL;
hclge_rm_uc_addr_common(vport, old_addr); spin_lock_bh(&vport->mac_list_lock);
status = hclge_add_uc_addr_common(vport, mac_addr); status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
if (status) { mac_addr);
hclge_add_uc_addr_common(vport, old_addr); spin_unlock_bh(&vport->mac_list_lock);
} else { hclge_task_schedule(hdev, 0);
hclge_rm_vport_mac_table(vport, mac_addr,
false, HCLGE_MAC_ADDR_UC);
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
}
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
status = hclge_add_uc_addr_common(vport, mac_addr); status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
if (!status) HCLGE_MAC_ADDR_UC, mac_addr);
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_UC);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
status = hclge_rm_uc_addr_common(vport, mac_addr); status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
if (!status) HCLGE_MAC_ADDR_UC, mac_addr);
hclge_rm_vport_mac_table(vport, mac_addr,
false, HCLGE_MAC_ADDR_UC);
} else { } else {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed to set unicast mac addr, unknown subcode %u\n", "failed to set unicast mac addr, unknown subcode %u\n",
...@@ -310,18 +301,13 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, ...@@ -310,18 +301,13 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
{ {
const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int status;
if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
status = hclge_add_mc_addr_common(vport, mac_addr); hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
if (!status) HCLGE_MAC_ADDR_MC, mac_addr);
hclge_add_vport_mac_table(vport, mac_addr,
HCLGE_MAC_ADDR_MC);
} else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
status = hclge_rm_mc_addr_common(vport, mac_addr); hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
if (!status) HCLGE_MAC_ADDR_MC, mac_addr);
hclge_rm_vport_mac_table(vport, mac_addr,
false, HCLGE_MAC_ADDR_MC);
} else { } else {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed to set mcast mac addr, unknown subcode %u\n", "failed to set mcast mac addr, unknown subcode %u\n",
...@@ -329,7 +315,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, ...@@ -329,7 +315,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return -EIO; return -EIO;
} }
return status; return 0;
} }
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
...@@ -643,6 +629,23 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev) ...@@ -643,6 +629,23 @@ static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
ae_dev->ops->reset_event(hdev->pdev, NULL); ae_dev->ops->reset_event(hdev->pdev, NULL);
} }
static void hclge_handle_vf_tbl(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_dev *hdev = vport->back;
struct hclge_vf_vlan_cfg *msg_cmd;
msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true);
} else {
dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
msg_cmd->subcode);
}
}
void hclge_mbx_handler(struct hclge_dev *hdev) void hclge_mbx_handler(struct hclge_dev *hdev)
{ {
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
...@@ -650,6 +653,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -650,6 +653,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
struct hclge_mbx_vf_to_pf_cmd *req; struct hclge_mbx_vf_to_pf_cmd *req;
struct hclge_vport *vport; struct hclge_vport *vport;
struct hclge_desc *desc; struct hclge_desc *desc;
bool is_del = false;
unsigned int flag; unsigned int flag;
int ret = 0; int ret = 0;
...@@ -767,11 +771,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -767,11 +771,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
break; break;
case HCLGE_MBX_GET_VF_FLR_STATUS: case HCLGE_MBX_GET_VF_FLR_STATUS:
case HCLGE_MBX_VF_UNINIT: case HCLGE_MBX_VF_UNINIT:
hclge_rm_vport_all_mac_table(vport, true, is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_UC); HCLGE_MAC_ADDR_UC);
hclge_rm_vport_all_mac_table(vport, true, hclge_rm_vport_all_mac_table(vport, is_del,
HCLGE_MAC_ADDR_MC); HCLGE_MAC_ADDR_MC);
hclge_rm_vport_all_vlan_table(vport, true); hclge_rm_vport_all_vlan_table(vport, is_del);
break; break;
case HCLGE_MBX_GET_MEDIA_TYPE: case HCLGE_MBX_GET_MEDIA_TYPE:
hclge_get_vf_media_type(vport, &resp_msg); hclge_get_vf_media_type(vport, &resp_msg);
...@@ -785,6 +790,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -785,6 +790,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_NCSI_ERROR: case HCLGE_MBX_NCSI_ERROR:
hclge_handle_ncsi_error(hdev); hclge_handle_ncsi_error(hdev);
break; break;
case HCLGE_MBX_HANDLE_VF_TBL:
hclge_handle_vf_tbl(vport, req);
break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n", "un-supported mailbox message, code = %u\n",
......
...@@ -1164,6 +1164,27 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, ...@@ -1164,6 +1164,27 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
en_bc_pmc); en_bc_pmc);
} }
static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
}
static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
{
struct hnae3_handle *handle = &hdev->nic;
bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
int ret;
if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
if (!ret)
clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
}
}
static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
int stream_id, bool enable) int stream_id, bool enable)
{ {
...@@ -1245,9 +1266,11 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, ...@@ -1245,9 +1266,11 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
int status; int status;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
send_msg.subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD : send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
HCLGE_MBX_MAC_VLAN_UC_MODIFY;
ether_addr_copy(send_msg.data, new_mac_addr); ether_addr_copy(send_msg.data, new_mac_addr);
if (is_first && !hdev->has_pf_mac)
eth_zero_addr(&send_msg.data[ETH_ALEN]);
else
ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (!status) if (!status)
...@@ -1256,54 +1279,302 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, ...@@ -1256,54 +1279,302 @@ static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
return status; return status;
} }
static int hclgevf_add_uc_addr(struct hnae3_handle *handle, static struct hclgevf_mac_addr_node *
hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
{
struct hclgevf_mac_addr_node *mac_node, *tmp;
list_for_each_entry_safe(mac_node, tmp, list, node)
if (ether_addr_equal(mac_addr, mac_node->mac_addr))
return mac_node;
return NULL;
}
static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
enum HCLGEVF_MAC_NODE_STATE state)
{
switch (state) {
/* from set_rx_mode or tmp_add_list */
case HCLGEVF_MAC_TO_ADD:
if (mac_node->state == HCLGEVF_MAC_TO_DEL)
mac_node->state = HCLGEVF_MAC_ACTIVE;
break;
/* only from set_rx_mode */
case HCLGEVF_MAC_TO_DEL:
if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
list_del(&mac_node->node);
kfree(mac_node);
} else {
mac_node->state = HCLGEVF_MAC_TO_DEL;
}
break;
/* only from tmp_add_list, the mac_node->state won't be
* HCLGEVF_MAC_ACTIVE
*/
case HCLGEVF_MAC_ACTIVE:
if (mac_node->state == HCLGEVF_MAC_TO_ADD)
mac_node->state = HCLGEVF_MAC_ACTIVE;
break;
}
}
static int hclgevf_update_mac_list(struct hnae3_handle *handle,
enum HCLGEVF_MAC_NODE_STATE state,
enum HCLGEVF_MAC_ADDR_TYPE mac_type,
const unsigned char *addr) const unsigned char *addr)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclge_vf_to_pf_msg send_msg; struct hclgevf_mac_addr_node *mac_node;
struct list_head *list;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
HCLGE_MBX_MAC_VLAN_UC_ADD); &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
ether_addr_copy(send_msg.data, addr);
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); spin_lock_bh(&hdev->mac_table.mac_list_lock);
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
* new state, or just remove it, or do nothing.
*/
mac_node = hclgevf_find_mac_node(list, addr);
if (mac_node) {
hclgevf_update_mac_node(mac_node, state);
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
return 0;
}
/* if this address is never added, unnecessary to delete */
if (state == HCLGEVF_MAC_TO_DEL) {
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
return -ENOENT;
}
mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
if (!mac_node) {
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
return -ENOMEM;
}
mac_node->state = state;
ether_addr_copy(mac_node->mac_addr, addr);
list_add_tail(&mac_node->node, list);
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
return 0;
} }
static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
const unsigned char *addr) const unsigned char *addr)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
struct hclge_vf_to_pf_msg send_msg; HCLGEVF_MAC_ADDR_UC, addr);
}
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
HCLGE_MBX_MAC_VLAN_UC_REMOVE); const unsigned char *addr)
ether_addr_copy(send_msg.data, addr); {
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
HCLGEVF_MAC_ADDR_UC, addr);
} }
static int hclgevf_add_mc_addr(struct hnae3_handle *handle, static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr) const unsigned char *addr)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
struct hclge_vf_to_pf_msg send_msg; HCLGEVF_MAC_ADDR_MC, addr);
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST,
HCLGE_MBX_MAC_VLAN_MC_ADD);
ether_addr_copy(send_msg.data, addr);
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
} }
static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
const unsigned char *addr) const unsigned char *addr)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
HCLGEVF_MAC_ADDR_MC, addr);
}
static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
struct hclgevf_mac_addr_node *mac_node,
enum HCLGEVF_MAC_ADDR_TYPE mac_type)
{
struct hclge_vf_to_pf_msg send_msg; struct hclge_vf_to_pf_msg send_msg;
u8 code, subcode;
if (mac_type == HCLGEVF_MAC_ADDR_UC) {
code = HCLGE_MBX_SET_UNICAST;
if (mac_node->state == HCLGEVF_MAC_TO_ADD)
subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
else
subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
} else {
code = HCLGE_MBX_SET_MULTICAST;
if (mac_node->state == HCLGEVF_MAC_TO_ADD)
subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
else
subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
}
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MULTICAST, hclgevf_build_send_msg(&send_msg, code, subcode);
HCLGE_MBX_MAC_VLAN_MC_REMOVE); ether_addr_copy(send_msg.data, mac_node->mac_addr);
ether_addr_copy(send_msg.data, addr);
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
} }
static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
struct list_head *list,
enum HCLGEVF_MAC_ADDR_TYPE mac_type)
{
struct hclgevf_mac_addr_node *mac_node, *tmp;
int ret;
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to configure mac %pM, state = %d, ret = %d\n",
mac_node->mac_addr, mac_node->state, ret);
return;
}
if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
mac_node->state = HCLGEVF_MAC_ACTIVE;
} else {
list_del(&mac_node->node);
kfree(mac_node);
}
}
}
static void hclgevf_sync_from_add_list(struct list_head *add_list,
struct list_head *mac_list)
{
struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
list_for_each_entry_safe(mac_node, tmp, add_list, node) {
/* if the mac address from tmp_add_list is not in the
* uc/mc_mac_list, it means have received a TO_DEL request
* during the time window of sending mac config request to PF
* If mac_node state is ACTIVE, then change its state to TO_DEL,
* then it will be removed at next time. If is TO_ADD, it means
* send TO_ADD request failed, so just remove the mac node.
*/
new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
hclgevf_update_mac_node(new_node, mac_node->state);
list_del(&mac_node->node);
kfree(mac_node);
} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
mac_node->state = HCLGEVF_MAC_TO_DEL;
list_del(&mac_node->node);
list_add_tail(&mac_node->node, mac_list);
} else {
list_del(&mac_node->node);
kfree(mac_node);
}
}
}
static void hclgevf_sync_from_del_list(struct list_head *del_list,
struct list_head *mac_list)
{
struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
list_for_each_entry_safe(mac_node, tmp, del_list, node) {
new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
/* If the mac addr is exist in the mac list, it means
* received a new request TO_ADD during the time window
* of sending mac addr configurrequest to PF, so just
* change the mac state to ACTIVE.
*/
new_node->state = HCLGEVF_MAC_ACTIVE;
list_del(&mac_node->node);
kfree(mac_node);
} else {
list_del(&mac_node->node);
list_add_tail(&mac_node->node, mac_list);
}
}
}
static void hclgevf_clear_list(struct list_head *list)
{
struct hclgevf_mac_addr_node *mac_node, *tmp;
list_for_each_entry_safe(mac_node, tmp, list, node) {
list_del(&mac_node->node);
kfree(mac_node);
}
}
static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
enum HCLGEVF_MAC_ADDR_TYPE mac_type)
{
struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
struct list_head tmp_add_list, tmp_del_list;
struct list_head *list;
INIT_LIST_HEAD(&tmp_add_list);
INIT_LIST_HEAD(&tmp_del_list);
/* move the mac addr to the tmp_add_list and tmp_del_list, then
* we can add/delete these mac addr outside the spin lock
*/
list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
&hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
spin_lock_bh(&hdev->mac_table.mac_list_lock);
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGEVF_MAC_TO_DEL:
list_del(&mac_node->node);
list_add_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGEVF_MAC_TO_ADD:
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
if (!new_node)
goto stop_traverse;
ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
new_node->state = mac_node->state;
list_add_tail(&new_node->node, &tmp_add_list);
break;
default:
break;
}
}
stop_traverse:
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
/* delete first, in order to get max mac table space for adding */
hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
/* if some mac addresses were added/deleted fail, move back to the
* mac_list, and retry at next time.
*/
spin_lock_bh(&hdev->mac_table.mac_list_lock);
hclgevf_sync_from_del_list(&tmp_del_list, list);
hclgevf_sync_from_add_list(&tmp_add_list, list);
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
}
static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
{
hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
}
static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
{
spin_lock_bh(&hdev->mac_table.mac_list_lock);
hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
}
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id, __be16 proto, u16 vlan_id,
bool is_kill) bool is_kill)
...@@ -1506,10 +1777,6 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) ...@@ -1506,10 +1777,6 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret) if (ret)
return ret; return ret;
ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
if (ret)
return ret;
/* clear handshake status with IMP */ /* clear handshake status with IMP */
hclgevf_reset_handshake(hdev, false); hclgevf_reset_handshake(hdev, false);
...@@ -1589,13 +1856,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) ...@@ -1589,13 +1856,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret; int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
hdev->rst_stats.rst_cnt++; hdev->rst_stats.rst_cnt++;
rtnl_lock(); rtnl_lock();
...@@ -1610,7 +1872,6 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) ...@@ -1610,7 +1872,6 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret; int ret;
hdev->rst_stats.hw_rst_done_cnt++; hdev->rst_stats.hw_rst_done_cnt++;
...@@ -1625,7 +1886,6 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) ...@@ -1625,7 +1886,6 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
} }
hdev->last_reset_time = jiffies; hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++; hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0; hdev->rst_stats.rst_fail_cnt = 0;
clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
...@@ -1951,6 +2211,10 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) ...@@ -1951,6 +2211,10 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
hclgevf_sync_vlan_filter(hdev); hclgevf_sync_vlan_filter(hdev);
hclgevf_sync_mac_table(hdev);
hclgevf_sync_promisc_mode(hdev);
hdev->last_serv_processed = jiffies; hdev->last_serv_processed = jiffies;
out: out:
...@@ -2313,6 +2577,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev) ...@@ -2313,6 +2577,10 @@ static void hclgevf_state_init(struct hclgevf_dev *hdev)
mutex_init(&hdev->mbx_resp.mbx_mutex); mutex_init(&hdev->mbx_resp.mbx_mutex);
sema_init(&hdev->reset_sem, 1); sema_init(&hdev->reset_sem, 1);
spin_lock_init(&hdev->mac_table.mac_list_lock);
INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
/* bring the device down */ /* bring the device down */
set_bit(HCLGEVF_STATE_DOWN, &hdev->state); set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
} }
...@@ -2695,6 +2963,15 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev) ...@@ -2695,6 +2963,15 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
return ret; return ret;
} }
static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
{
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
HCLGE_MBX_VPORT_LIST_CLEAR);
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
}
static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{ {
struct pci_dev *pdev = hdev->pdev; struct pci_dev *pdev = hdev->pdev;
...@@ -2730,6 +3007,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) ...@@ -2730,6 +3007,8 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret; return ret;
} }
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
dev_info(&hdev->pdev->dev, "Reset done\n"); dev_info(&hdev->pdev->dev, "Reset done\n");
return 0; return 0;
...@@ -2802,6 +3081,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2802,6 +3081,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config; goto err_config;
} }
/* ensure vf tbl list as empty before init*/
ret = hclgevf_clear_vport_list(hdev);
if (ret) {
dev_err(&pdev->dev,
"failed to clear tbl list configuration, ret = %d.\n",
ret);
goto err_config;
}
ret = hclgevf_init_vlan_config(hdev); ret = hclgevf_init_vlan_config(hdev);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -2846,6 +3134,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) ...@@ -2846,6 +3134,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_pci_uninit(hdev); hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev); hclgevf_cmd_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
} }
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
...@@ -3213,6 +3502,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -3213,6 +3502,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_timer_task = hclgevf_set_timer_task, .set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode, .get_link_mode = hclgevf_get_link_mode,
.set_promisc_mode = hclgevf_set_promisc_mode, .set_promisc_mode = hclgevf_set_promisc_mode,
.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
}; };
static struct hnae3_ae_algo ae_algovf = { static struct hnae3_ae_algo ae_algovf = {
......
...@@ -148,6 +148,7 @@ enum hclgevf_states { ...@@ -148,6 +148,7 @@ enum hclgevf_states {
HCLGEVF_STATE_MBX_HANDLING, HCLGEVF_STATE_MBX_HANDLING,
HCLGEVF_STATE_CMD_DISABLE, HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING, HCLGEVF_STATE_LINK_UPDATING,
HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL, HCLGEVF_STATE_RST_FAIL,
}; };
...@@ -234,6 +235,29 @@ struct hclgevf_rst_stats { ...@@ -234,6 +235,29 @@ struct hclgevf_rst_stats {
u32 rst_fail_cnt; /* the number of VF reset fail */ u32 rst_fail_cnt; /* the number of VF reset fail */
}; };
enum HCLGEVF_MAC_ADDR_TYPE {
HCLGEVF_MAC_ADDR_UC,
HCLGEVF_MAC_ADDR_MC
};
enum HCLGEVF_MAC_NODE_STATE {
HCLGEVF_MAC_TO_ADD,
HCLGEVF_MAC_TO_DEL,
HCLGEVF_MAC_ACTIVE
};
struct hclgevf_mac_addr_node {
struct list_head node;
enum HCLGEVF_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN];
};
struct hclgevf_mac_table_cfg {
spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list;
struct list_head mc_mac_list;
};
struct hclgevf_dev { struct hclgevf_dev {
struct pci_dev *pdev; struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev; struct hnae3_ae_dev *ae_dev;
...@@ -282,6 +306,8 @@ struct hclgevf_dev { ...@@ -282,6 +306,8 @@ struct hclgevf_dev {
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclgevf_mac_table_cfg mac_table;
bool mbx_event_pending; bool mbx_event_pending;
struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment