Commit e2664f97 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-misc-fixes'

Salil Mehta says:

====================
Misc. bug fixes & cleanups for HNS3 driver

This patch-set presents some miscellaneous bug fixes and cleanups
for the HNS3 driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4fed38cf 8d40854f
...@@ -239,7 +239,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) ...@@ -239,7 +239,28 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo; struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
int ret; int i, ret;
if (kinfo->num_tc <= 1) {
netdev_reset_tc(netdev);
} else {
ret = netdev_set_num_tc(netdev, kinfo->num_tc);
if (ret) {
netdev_err(netdev,
"netdev_set_num_tc fail, ret=%d!\n", ret);
return ret;
}
for (i = 0; i < HNAE3_MAX_TC; i++) {
if (!kinfo->tc_info[i].enable)
continue;
netdev_set_tc_queue(netdev,
kinfo->tc_info[i].tc,
kinfo->tc_info[i].tqp_count,
kinfo->tc_info[i].tqp_offset);
}
}
ret = netif_set_real_num_tx_queues(netdev, queue_size); ret = netif_set_real_num_tx_queues(netdev, queue_size);
if (ret) { if (ret) {
...@@ -312,7 +333,9 @@ static int hns3_nic_net_up(struct net_device *netdev) ...@@ -312,7 +333,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
static int hns3_nic_net_open(struct net_device *netdev) static int hns3_nic_net_open(struct net_device *netdev)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret; struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo;
int i, ret;
netif_carrier_off(netdev); netif_carrier_off(netdev);
...@@ -327,6 +350,12 @@ static int hns3_nic_net_open(struct net_device *netdev) ...@@ -327,6 +350,12 @@ static int hns3_nic_net_open(struct net_device *netdev)
return ret; return ret;
} }
kinfo = &h->kinfo;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
netdev_set_prio_tc_map(netdev, i,
kinfo->prio_tc[i]);
}
priv->ae_handle->last_reset_time = jiffies; priv->ae_handle->last_reset_time = jiffies;
return 0; return 0;
} }
...@@ -762,16 +791,14 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -762,16 +791,14 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
*/ */
if (skb_is_gso(skb)) if (skb_is_gso(skb))
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
} else if (l3.v6->version == 6) { } else if (l3.v6->version == 6) {
hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
HNS3_TXD_L3T_S, HNS3_L3T_IPV6); HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
} }
switch (l4_proto) { switch (l4_proto) {
case IPPROTO_TCP: case IPPROTO_TCP:
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hnae3_set_field(*type_cs_vlan_tso, hnae3_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M, HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S, HNS3_TXD_L4T_S,
...@@ -781,12 +808,14 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, ...@@ -781,12 +808,14 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
if (hns3_tunnel_csum_bug(skb)) if (hns3_tunnel_csum_bug(skb))
break; break;
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hnae3_set_field(*type_cs_vlan_tso, hnae3_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M, HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S, HNS3_TXD_L4T_S,
HNS3_L4T_UDP); HNS3_L4T_UDP);
break; break;
case IPPROTO_SCTP: case IPPROTO_SCTP:
hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hnae3_set_field(*type_cs_vlan_tso, hnae3_set_field(*type_cs_vlan_tso,
HNS3_TXD_L4T_M, HNS3_TXD_L4T_M,
HNS3_TXD_L4T_S, HNS3_TXD_L4T_S,
...@@ -1307,7 +1336,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) ...@@ -1307,7 +1336,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
u16 mode = mqprio_qopt->mode; u16 mode = mqprio_qopt->mode;
u8 hw = mqprio_qopt->qopt.hw; u8 hw = mqprio_qopt->qopt.hw;
bool if_running; bool if_running;
unsigned int i;
int ret; int ret;
if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
...@@ -1331,24 +1359,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) ...@@ -1331,24 +1359,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
if (ret) if (ret)
goto out; goto out;
if (tc <= 1) {
netdev_reset_tc(netdev);
} else {
ret = netdev_set_num_tc(netdev, tc);
if (ret)
goto out;
for (i = 0; i < HNAE3_MAX_TC; i++) {
if (!kinfo->tc_info[i].enable)
continue;
netdev_set_tc_queue(netdev,
kinfo->tc_info[i].tc,
kinfo->tc_info[i].tqp_count,
kinfo->tc_info[i].tqp_offset);
}
}
ret = hns3_nic_set_real_num_queue(netdev); ret = hns3_nic_set_real_num_queue(netdev);
out: out:
...@@ -3202,7 +3212,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) ...@@ -3202,7 +3212,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
struct net_device *ndev = kinfo->netdev; struct net_device *ndev = kinfo->netdev;
bool if_running; bool if_running;
int ret; int ret;
u8 i;
if (tc > HNAE3_MAX_TC) if (tc > HNAE3_MAX_TC)
return -EINVAL; return -EINVAL;
...@@ -3212,10 +3221,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) ...@@ -3212,10 +3221,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if_running = netif_running(ndev); if_running = netif_running(ndev);
ret = netdev_set_num_tc(ndev, tc);
if (ret)
return ret;
if (if_running) { if (if_running) {
(void)hns3_nic_net_stop(ndev); (void)hns3_nic_net_stop(ndev);
msleep(100); msleep(100);
...@@ -3226,27 +3231,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) ...@@ -3226,27 +3231,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
if (ret) if (ret)
goto err_out; goto err_out;
if (tc <= 1) {
netdev_reset_tc(ndev);
goto out;
}
for (i = 0; i < HNAE3_MAX_TC; i++) {
struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
if (tc_info->enable)
netdev_set_tc_queue(ndev,
tc_info->tc,
tc_info->tqp_count,
tc_info->tqp_offset);
}
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
netdev_set_prio_tc_map(ndev, i,
kinfo->prio_tc[i]);
}
out:
ret = hns3_nic_set_real_num_queue(ndev); ret = hns3_nic_set_real_num_queue(ndev);
err_out: err_out:
......
...@@ -201,7 +201,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) ...@@ -201,7 +201,9 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
rx_group = &ring->tqp_vector->rx_group; rx_group = &ring->tqp_vector->rx_group;
pre_rx_pkt = rx_group->total_packets; pre_rx_pkt = rx_group->total_packets;
preempt_disable();
hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
preempt_enable();
rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
rx_group->total_packets = pre_rx_pkt; rx_group->total_packets = pre_rx_pkt;
......
...@@ -206,7 +206,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -206,7 +206,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock); spin_lock_bh(&hw->cmq.csq.lock);
if (num > hclge_ring_space(&hw->cmq.csq)) { if (num > hclge_ring_space(&hw->cmq.csq) ||
test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock); spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY; return -EBUSY;
} }
...@@ -346,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) ...@@ -346,6 +347,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
spin_lock_init(&hdev->hw.cmq.crq.lock); spin_lock_init(&hdev->hw.cmq.crq.lock);
hclge_cmd_init_regs(&hdev->hw); hclge_cmd_init_regs(&hdev->hw);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) { if (ret) {
......
...@@ -571,7 +571,8 @@ struct hclge_config_auto_neg_cmd { ...@@ -571,7 +571,8 @@ struct hclge_config_auto_neg_cmd {
struct hclge_config_max_frm_size_cmd { struct hclge_config_max_frm_size_cmd {
__le16 max_frm_size; __le16 max_frm_size;
u8 rsv[22]; u8 min_frm_size;
u8 rsv[21];
}; };
enum hclge_mac_vlan_tbl_opcode { enum hclge_mac_vlan_tbl_opcode {
......
...@@ -1834,8 +1834,6 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, ...@@ -1834,8 +1834,6 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
return 0; return 0;
} }
#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0)
static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc) struct hclge_pkt_buf_alloc *buf_alloc)
{ {
...@@ -1863,13 +1861,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, ...@@ -1863,13 +1861,11 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
req->tc_wl[j].high = req->tc_wl[j].high =
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].high |= req->tc_wl[j].high |=
cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
HCLGE_RX_PRIV_EN_B);
req->tc_wl[j].low = req->tc_wl[j].low =
cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].low |= req->tc_wl[j].low |=
cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
HCLGE_RX_PRIV_EN_B);
} }
} }
...@@ -1911,13 +1907,11 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev, ...@@ -1911,13 +1907,11 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
req->com_thrd[j].high = req->com_thrd[j].high =
cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].high |= req->com_thrd[j].high |=
cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
HCLGE_RX_PRIV_EN_B);
req->com_thrd[j].low = req->com_thrd[j].low =
cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].low |= req->com_thrd[j].low |=
cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
HCLGE_RX_PRIV_EN_B);
} }
} }
...@@ -1943,14 +1937,10 @@ static int hclge_common_wl_config(struct hclge_dev *hdev, ...@@ -1943,14 +1937,10 @@ static int hclge_common_wl_config(struct hclge_dev *hdev,
req = (struct hclge_rx_com_wl *)desc.data; req = (struct hclge_rx_com_wl *)desc.data;
req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
req->com_wl.high |= req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) <<
HCLGE_RX_PRIV_EN_B);
req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
req->com_wl.low |= req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) <<
HCLGE_RX_PRIV_EN_B);
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
...@@ -2517,12 +2507,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) ...@@ -2517,12 +2507,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* check for vector0 reset event sources */ /* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST; return HCLGE_VECTOR0_EVENT_RST;
} }
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST; return HCLGE_VECTOR0_EVENT_RST;
...@@ -2815,8 +2807,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) ...@@ -2815,8 +2807,6 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
break; break;
default: default:
dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d",
hdev->reset_type);
break; break;
} }
...@@ -4997,6 +4987,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) ...@@ -4997,6 +4987,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
req = (struct hclge_config_max_frm_size_cmd *)desc.data; req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(max_frm_size); req->max_frm_size = cpu_to_le16(max_frm_size);
req->min_frm_size = HCLGE_MAC_MIN_FRAME;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
......
...@@ -128,6 +128,7 @@ enum HCLGE_DEV_STATE { ...@@ -128,6 +128,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_STATISTICS_UPDATING, HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_MAX HCLGE_STATE_MAX
}; };
......
...@@ -1223,6 +1223,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) ...@@ -1223,6 +1223,10 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
tx_en = true; tx_en = true;
rx_en = true; rx_en = true;
break; break;
case HCLGE_FC_PFC:
tx_en = false;
rx_en = false;
break;
default: default:
tx_en = true; tx_en = true;
rx_en = true; rx_en = true;
...@@ -1240,8 +1244,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) ...@@ -1240,8 +1244,9 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret) if (ret)
return ret; return ret;
if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) ret = hclge_mac_pause_setup_hw(hdev);
return hclge_mac_pause_setup_hw(hdev); if (ret)
return ret;
/* Only DCB-supported dev supports qset back pressure and pfc cmd */ /* Only DCB-supported dev supports qset back pressure and pfc cmd */
if (!hnae3_dev_dcb_supported(hdev)) if (!hnae3_dev_dcb_supported(hdev))
......
...@@ -658,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector( ...@@ -658,8 +658,17 @@ static int hclgevf_unmap_ring_from_vector(
static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int vector_id;
vector_id = hclgevf_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
"hclgevf_put_vector get vector index fail. ret =%d\n",
vector_id);
return vector_id;
}
hclgevf_free_vector(hdev, vector); hclgevf_free_vector(hdev, vector_id);
return 0; return 0;
} }
......
...@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -208,7 +208,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* tail the async message in arq */ /* tail the async message in arq */
msg_q = hdev->arq.msg_q[hdev->arq.tail]; msg_q = hdev->arq.msg_q[hdev->arq.tail];
memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE); memcpy(&msg_q[0], req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq); hclge_mbx_tail_ptr_move_arq(hdev->arq);
hdev->arq.count++; hdev->arq.count++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment