Commit 5d64075c authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-fixes'

Jijie Shao says:

====================
There are some bugfix for the HNS3 ethernet driver

There are some bugfix for the HNS3 ethernet driver

---
ChangeLog:
v1 -> v2:
  - net: hns3: fix add VLAN fail issue, net: hns3: fix VF reset fail issue
    are modified suggested by Paolo
  v1: https://lore.kernel.org/all/20231028025917.314305-1-shaojijie@huawei.com/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2bd5b559 dff655e8
...@@ -503,11 +503,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -503,11 +503,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
} }
sprintf(result[j++], "%d", i); sprintf(result[j++], "%d", i);
sprintf(result[j++], "%s", dim_state_str[dim->state]); sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
dim_state_str[dim->state] : "unknown");
sprintf(result[j++], "%u", dim->profile_ix); sprintf(result[j++], "%u", dim->profile_ix);
sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]); sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
dim_cqe_mode_str[dim->mode] : "unknown");
sprintf(result[j++], "%s", sprintf(result[j++], "%s",
dim_tune_stat_str[dim->tune_state]); dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
dim_tune_stat_str[dim->tune_state] : "unknown");
sprintf(result[j++], "%u", dim->steps_left); sprintf(result[j++], "%u", dim->steps_left);
sprintf(result[j++], "%u", dim->steps_right); sprintf(result[j++], "%u", dim->steps_right);
sprintf(result[j++], "%u", dim->tired); sprintf(result[j++], "%u", dim->tired);
......
...@@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev) ...@@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN]; u8 mac_addr_temp[ETH_ALEN] = {0};
int ret = 0; int ret = 0;
if (h->ae_algo->ops->get_mac_addr) if (h->ae_algo->ops->get_mac_addr)
......
...@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev); ...@@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
static void hclge_update_fec_stats(struct hclge_dev *hdev); static void hclge_update_fec_stats(struct hclge_dev *hdev);
static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
int wait_cnt); int wait_cnt);
static int hclge_update_port_info(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo; static struct hnae3_ae_algo ae_algo;
...@@ -3041,6 +3042,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev) ...@@ -3041,6 +3042,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (state != hdev->hw.mac.link) { if (state != hdev->hw.mac.link) {
hdev->hw.mac.link = state; hdev->hw.mac.link = state;
if (state == HCLGE_LINK_STATUS_UP)
hclge_update_port_info(hdev);
client->ops->link_status_change(handle, state); client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state); hclge_config_mac_tnl_int(hdev, state);
if (rclient && rclient->ops->link_status_change) if (rclient && rclient->ops->link_status_change)
...@@ -10025,8 +10029,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, ...@@ -10025,8 +10029,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
mutex_lock(&hdev->vport_lock);
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) { if (vlan->vlan_id == vlan_id) {
if (is_write_tbl && vlan->hd_tbl_status) if (is_write_tbl && vlan->hd_tbl_status)
...@@ -10041,8 +10043,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, ...@@ -10041,8 +10043,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
break; break;
} }
} }
mutex_unlock(&hdev->vport_lock);
} }
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
...@@ -10451,11 +10451,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, ...@@ -10451,11 +10451,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
* handle mailbox. Just record the vlan id, and remove it after * handle mailbox. Just record the vlan id, and remove it after
* reset finished. * reset finished.
*/ */
mutex_lock(&hdev->vport_lock);
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap); set_bit(vlan_id, vport->vlan_del_fail_bmap);
mutex_unlock(&hdev->vport_lock);
return -EBUSY; return -EBUSY;
} else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
} }
mutex_unlock(&hdev->vport_lock);
/* when port base vlan enabled, we use port base vlan as the vlan /* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table * filter entry. In this case, we don't update vlan filter table
...@@ -10470,17 +10475,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, ...@@ -10470,17 +10475,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
} }
if (!ret) { if (!ret) {
if (!is_kill) if (!is_kill) {
hclge_add_vport_vlan_table(vport, vlan_id, hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl); writen_to_tbl);
else if (is_kill && vlan_id != 0) } else if (is_kill && vlan_id != 0) {
mutex_lock(&hdev->vport_lock);
hclge_rm_vport_vlan_table(vport, vlan_id, false); hclge_rm_vport_vlan_table(vport, vlan_id, false);
mutex_unlock(&hdev->vport_lock);
}
} else if (is_kill) { } else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id, /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence * and try to remove it from hw later, to be consistence
* with stack * with stack
*/ */
mutex_lock(&hdev->vport_lock);
set_bit(vlan_id, vport->vlan_del_fail_bmap); set_bit(vlan_id, vport->vlan_del_fail_bmap);
mutex_unlock(&hdev->vport_lock);
} }
hclge_set_vport_vlan_fltr_change(vport); hclge_set_vport_vlan_fltr_change(vport);
...@@ -10520,6 +10530,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev) ...@@ -10520,6 +10530,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
int i, ret, sync_cnt = 0; int i, ret, sync_cnt = 0;
u16 vlan_id; u16 vlan_id;
mutex_lock(&hdev->vport_lock);
/* start from vport 1 for PF is always alive */ /* start from vport 1 for PF is always alive */
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i]; struct hclge_vport *vport = &hdev->vport[i];
...@@ -10530,21 +10541,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev) ...@@ -10530,21 +10541,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id, vport->vport_id, vlan_id,
true); true);
if (ret && ret != -EINVAL) if (ret && ret != -EINVAL) {
mutex_unlock(&hdev->vport_lock);
return; return;
}
clear_bit(vlan_id, vport->vlan_del_fail_bmap); clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false); hclge_rm_vport_vlan_table(vport, vlan_id, false);
hclge_set_vport_vlan_fltr_change(vport); hclge_set_vport_vlan_fltr_change(vport);
sync_cnt++; sync_cnt++;
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
mutex_unlock(&hdev->vport_lock);
return; return;
}
vlan_id = find_first_bit(vport->vlan_del_fail_bmap, vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
VLAN_N_VID); VLAN_N_VID);
} }
} }
mutex_unlock(&hdev->vport_lock);
hclge_sync_vlan_fltr_state(hdev); hclge_sync_vlan_fltr_state(hdev);
} }
...@@ -11651,6 +11667,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -11651,6 +11667,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_msi_irq_uninit; goto err_msi_irq_uninit;
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
if (hnae3_dev_phy_imp_supported(hdev)) if (hnae3_dev_phy_imp_supported(hdev))
ret = hclge_update_tp_port_info(hdev); ret = hclge_update_tp_port_info(hdev);
else else
......
...@@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap); set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY; return -EBUSY;
} else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
} }
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
...@@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) ...@@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
int ret, sync_cnt = 0; int ret, sync_cnt = 0;
u16 vlan_id; u16 vlan_id;
if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
return;
rtnl_lock();
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
while (vlan_id != VLAN_N_VID) { while (vlan_id != VLAN_N_VID) {
ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
vlan_id, true); vlan_id, true);
if (ret) if (ret)
return; break;
clear_bit(vlan_id, hdev->vlan_del_fail_bmap); clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
sync_cnt++; sync_cnt++;
if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
return; break;
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
} }
rtnl_unlock();
} }
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
...@@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, ...@@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_OTHER; return HCLGEVF_VECTOR0_EVENT_OTHER;
} }
static void hclgevf_reset_timer(struct timer_list *t)
{
struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
hclgevf_reset_task_schedule(hdev);
}
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{ {
#define HCLGEVF_RESET_DELAY 5
enum hclgevf_evt_cause event_cause; enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data; struct hclgevf_dev *hdev = data;
u32 clearval; u32 clearval;
...@@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) ...@@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
switch (event_cause) { switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST: case HCLGEVF_VECTOR0_EVENT_RST:
hclgevf_reset_task_schedule(hdev); mod_timer(&hdev->reset_timer,
jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
break; break;
case HCLGEVF_VECTOR0_EVENT_MBX: case HCLGEVF_VECTOR0_EVENT_MBX:
hclgevf_mbx_handler(hdev); hclgevf_mbx_handler(hdev);
...@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME); HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0; return 0;
......
...@@ -219,6 +219,7 @@ struct hclgevf_dev { ...@@ -219,6 +219,7 @@ struct hclgevf_dev {
enum hnae3_reset_type reset_level; enum hnae3_reset_type reset_level;
unsigned long reset_pending; unsigned long reset_pending;
enum hnae3_reset_type reset_type; enum hnae3_reset_type reset_type;
struct timer_list reset_timer;
#define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1 #define HCLGEVF_RESET_PENDING 1
......
...@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, ...@@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
i++; i++;
} }
/* ensure additional_info will be seen after received_resp */
smp_rmb();
if (i >= HCLGEVF_MAX_TRY_TIMES) { if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n", "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
...@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, ...@@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
resp->resp_status = hclgevf_resp_to_errno(resp_status); resp->resp_status = hclgevf_resp_to_errno(resp_status);
memcpy(resp->additional_info, req->msg.resp_data, memcpy(resp->additional_info, req->msg.resp_data,
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
/* ensure additional_info will be seen before setting received_resp */
smp_wmb();
if (match_id) { if (match_id) {
/* If match_id is not zero, it means PF support match_id. /* If match_id is not zero, it means PF support match_id.
* if the match_id is right, VF get the right response, or * if the match_id is right, VF get the right response, or
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment