Commit a68a8481 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
code optimizations & bugfixes for HNS3 driver

This patchset includes bugfixes and code optimizations for the HNS3
ethernet controller driver
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents beb73559 54a395b6
...@@ -40,6 +40,8 @@ enum HCLGE_MBX_OPCODE { ...@@ -40,6 +40,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
}; };
/* below are per-VF mac-vlan subcodes */ /* below are per-VF mac-vlan subcodes */
...@@ -60,7 +62,7 @@ enum hclge_mbx_vlan_cfg_subcode { ...@@ -60,7 +62,7 @@ enum hclge_mbx_vlan_cfg_subcode {
}; };
#define HCLGE_MBX_MAX_MSG_SIZE 16 #define HCLGE_MBX_MAX_MSG_SIZE 16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 #define HCLGE_MBX_MAX_RESP_DATA_SIZE 16
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3
......
...@@ -461,7 +461,7 @@ struct hnae3_ae_ops { ...@@ -461,7 +461,7 @@ struct hnae3_ae_ops {
bool (*get_hw_reset_stat)(struct hnae3_handle *handle); bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle); bool (*ae_dev_resetting)(struct hnae3_handle *handle);
unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
int (*set_gro_en)(struct hnae3_handle *handle, int enable); int (*set_gro_en)(struct hnae3_handle *handle, bool enable);
u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
void (*set_timer_task)(struct hnae3_handle *handle, bool enable); void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
int (*mac_connect_phy)(struct hnae3_handle *handle); int (*mac_connect_phy)(struct hnae3_handle *handle);
......
...@@ -1349,6 +1349,7 @@ static int hns3_nic_set_features(struct net_device *netdev, ...@@ -1349,6 +1349,7 @@ static int hns3_nic_set_features(struct net_device *netdev,
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
bool enable;
int ret; int ret;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
...@@ -1359,38 +1360,29 @@ static int hns3_nic_set_features(struct net_device *netdev, ...@@ -1359,38 +1360,29 @@ static int hns3_nic_set_features(struct net_device *netdev,
} }
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
if (features & NETIF_F_GRO_HW) enable = !!(features & NETIF_F_GRO_HW);
ret = h->ae_algo->ops->set_gro_en(h, true); ret = h->ae_algo->ops->set_gro_en(h, enable);
else
ret = h->ae_algo->ops->set_gro_en(h, false);
if (ret) if (ret)
return ret; return ret;
} }
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) { h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER) enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
h->ae_algo->ops->enable_vlan_filter(h, true); h->ae_algo->ops->enable_vlan_filter(h, enable);
else
h->ae_algo->ops->enable_vlan_filter(h, false);
} }
if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
h->ae_algo->ops->enable_hw_strip_rxvtag) { h->ae_algo->ops->enable_hw_strip_rxvtag) {
if (features & NETIF_F_HW_VLAN_CTAG_RX) enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
else
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
if (ret) if (ret)
return ret; return ret;
} }
if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
if (features & NETIF_F_NTUPLE) enable = !!(features & NETIF_F_NTUPLE);
h->ae_algo->ops->enable_fd(h, true); h->ae_algo->ops->enable_fd(h, enable);
else
h->ae_algo->ops->enable_fd(h, false);
} }
netdev->features = features; netdev->features = features;
...@@ -1937,8 +1929,7 @@ static void hns3_set_default_feature(struct net_device *netdev) ...@@ -1937,8 +1929,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) { if (pdev->revision >= 0x21) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | netdev->hw_features |= NETIF_F_GRO_HW;
NETIF_F_GRO_HW;
netdev->features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) { if (!(h->flags & HNAE3_SUPPORT_VF)) {
...@@ -2777,7 +2768,7 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) ...@@ -2777,7 +2768,7 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
u32 time_passed_ms; u32 time_passed_ms;
u16 new_int_gl; u16 new_int_gl;
if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) if (!tqp_vector->last_jiffies)
return false; return false;
if (ring_group->total_packets == 0) { if (ring_group->total_packets == 0) {
...@@ -2880,7 +2871,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) ...@@ -2880,7 +2871,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
} }
if (tx_group->coal.gl_adapt_enable) { if (tx_group->coal.gl_adapt_enable) {
tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); tx_update = hns3_get_new_int_gl(tx_group);
if (tx_update) if (tx_update)
hns3_set_vector_coalesce_tx_gl(tqp_vector, hns3_set_vector_coalesce_tx_gl(tqp_vector,
tx_group->coal.int_gl); tx_group->coal.int_gl);
......
...@@ -186,6 +186,38 @@ static bool hclge_is_special_opcode(u16 opcode) ...@@ -186,6 +186,38 @@ static bool hclge_is_special_opcode(u16 opcode)
return false; return false;
} }
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
u16 opcode, desc_ret;
int handle;
int retval;
opcode = le16_to_cpu(desc[0].opcode);
for (handle = 0; handle < num; handle++) {
desc[handle] = hw->cmq.csq.desc[ntc];
ntc++;
if (ntc >= hw->cmq.csq.desc_num)
ntc = 0;
}
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[num - 1].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else if (desc_ret == HCLGE_CMD_NO_AUTH)
retval = -EPERM;
else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
retval = -EOPNOTSUPP;
else
retval = -EIO;
hw->cmq.last_status = desc_ret;
return retval;
}
/** /**
* hclge_cmd_send - send command to command queue * hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
...@@ -203,7 +235,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -203,7 +235,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
u32 timeout = 0; u32 timeout = 0;
int handle = 0; int handle = 0;
int retval = 0; int retval = 0;
u16 opcode, desc_ret;
int ntc; int ntc;
spin_lock_bh(&hw->cmq.csq.lock); spin_lock_bh(&hw->cmq.csq.lock);
...@@ -219,12 +250,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -219,12 +250,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back * which will be use for hardware to write back
*/ */
ntc = hw->cmq.csq.next_to_use; ntc = hw->cmq.csq.next_to_use;
opcode = le16_to_cpu(desc[0].opcode);
while (handle < num) { while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle]; *desc_to_use = desc[handle];
(hw->cmq.csq.next_to_use)++; (hw->cmq.csq.next_to_use)++;
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
hw->cmq.csq.next_to_use = 0; hw->cmq.csq.next_to_use = 0;
handle++; handle++;
} }
...@@ -250,31 +280,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -250,31 +280,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
if (!complete) { if (!complete) {
retval = -EAGAIN; retval = -EAGAIN;
} else { } else {
handle = 0; retval = hclge_cmd_check_retval(hw, desc, num, ntc);
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
if (likely(!hclge_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[handle].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
retval = 0;
else if (desc_ret == HCLGE_CMD_NO_AUTH)
retval = -EPERM;
else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
retval = -EOPNOTSUPP;
else
retval = -EIO;
hw->cmq.last_status = desc_ret;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
ntc = 0;
}
} }
/* Clean the command send queue */ /* Clean the command send queue */
......
...@@ -312,16 +312,16 @@ struct hclge_ctrl_vector_chain_cmd { ...@@ -312,16 +312,16 @@ struct hclge_ctrl_vector_chain_cmd {
u8 rsv; u8 rsv;
}; };
#define HCLGE_TC_NUM 8 #define HCLGE_MAX_TC_NUM 8
#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
struct hclge_tx_buff_alloc_cmd { struct hclge_tx_buff_alloc_cmd {
__le16 tx_pkt_buff[HCLGE_TC_NUM]; __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM];
u8 tx_buff_rsv[8]; u8 tx_buff_rsv[8];
}; };
struct hclge_rx_priv_buff_cmd { struct hclge_rx_priv_buff_cmd {
__le16 buf_num[HCLGE_TC_NUM]; __le16 buf_num[HCLGE_MAX_TC_NUM];
__le16 shared_buf; __le16 shared_buf;
u8 rsv[6]; u8 rsv[6];
}; };
...@@ -367,7 +367,6 @@ struct hclge_priv_buf { ...@@ -367,7 +367,6 @@ struct hclge_priv_buf {
u32 enable; /* Enable TC private buffer or not */ u32 enable; /* Enable TC private buffer or not */
}; };
#define HCLGE_MAX_TC_NUM 8
struct hclge_shared_buf { struct hclge_shared_buf {
struct hclge_waterline self; struct hclge_waterline self;
struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM];
......
...@@ -691,7 +691,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -691,7 +691,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
tx_buf_cmd->tx_pkt_buff[i]); tx_buf_cmd->tx_pkt_buff[i]);
...@@ -703,7 +703,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -703,7 +703,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
rx_buf_cmd->buf_num[i]); rx_buf_cmd->buf_num[i]);
......
...@@ -355,16 +355,19 @@ static int hclge_get_link_info(struct hclge_vport *vport, ...@@ -355,16 +355,19 @@ static int hclge_get_link_info(struct hclge_vport *vport,
{ {
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 link_status; u16 link_status;
u8 msg_data[8]; u8 msg_data[10];
u16 media_type;
u8 dest_vfid; u8 dest_vfid;
u16 duplex; u16 duplex;
/* mac.link can only be 0 or 1 */ /* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link; link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex; duplex = hdev->hw.mac.duplex;
media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16)); memcpy(&msg_data[6], &duplex, sizeof(u16));
memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid; dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */ /* send this requested info to VF */
...@@ -372,6 +375,29 @@ static int hclge_get_link_info(struct hclge_vport *vport, ...@@ -372,6 +375,29 @@ static int hclge_get_link_info(struct hclge_vport *vport,
HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
} }
static void hclge_get_link_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_SUPPORTED 1
struct hclge_dev *hdev = vport->back;
unsigned long advertising;
unsigned long supported;
unsigned long send_data;
u8 msg_data[10];
u8 dest_vfid;
advertising = hdev->hw.mac.advertising[0];
supported = hdev->hw.mac.supported[0];
dest_vfid = mbx_req->mbx_src_vfid;
msg_data[0] = mbx_req->msg[2];
send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}
static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req) struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{ {
...@@ -556,6 +582,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ...@@ -556,6 +582,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to get qid for VF\n", "PF failed(%d) to get qid for VF\n",
ret); ret);
break; break;
case HCLGE_MBX_GET_LINK_MODE:
hclge_get_link_mode(vport, req);
break;
default: default:
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n", "un-supported mailbox message, code = %d\n",
......
...@@ -381,6 +381,21 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) ...@@ -381,6 +381,21 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
} }
} }
void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
#define HCLGEVF_ADVERTISING 0
#define HCLGEVF_SUPPORTED 1
u8 send_msg;
u8 resp_msg;
send_msg = HCLGEVF_ADVERTISING;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
sizeof(u8), false, &resp_msg, sizeof(u8));
send_msg = HCLGEVF_SUPPORTED;
hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg,
sizeof(u8), false, &resp_msg, sizeof(u8));
}
static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
{ {
struct hnae3_handle *nic = &hdev->nic; struct hnae3_handle *nic = &hdev->nic;
...@@ -1646,6 +1661,8 @@ static void hclgevf_service_task(struct work_struct *work) ...@@ -1646,6 +1661,8 @@ static void hclgevf_service_task(struct work_struct *work)
*/ */
hclgevf_request_link_info(hdev); hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
hclgevf_deferred_task_schedule(hdev); hclgevf_deferred_task_schedule(hdev);
clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
...@@ -1726,8 +1743,6 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) ...@@ -1726,8 +1743,6 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{ {
int ret; int ret;
hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
/* get queue configuration from PF */ /* get queue configuration from PF */
ret = hclgevf_get_queue_info(hdev); ret = hclgevf_get_queue_info(hdev);
if (ret) if (ret)
...@@ -1880,6 +1895,8 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) ...@@ -1880,6 +1895,8 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
hclgevf_request_link_info(hdev); hclgevf_request_link_info(hdev);
hclgevf_update_link_mode(hdev);
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
return 0; return 0;
...@@ -2550,7 +2567,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, ...@@ -2550,7 +2567,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex; hdev->hw.mac.duplex = duplex;
} }
static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
...@@ -2586,6 +2603,16 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) ...@@ -2586,6 +2603,16 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
return hdev->reset_count; return hdev->reset_count;
} }
static void hclgevf_get_link_mode(struct hnae3_handle *handle,
unsigned long *supported,
unsigned long *advertising)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
*supported = hdev->hw.mac.supported;
*advertising = hdev->hw.mac.advertising;
}
#define MAX_SEPARATE_NUM 4 #define MAX_SEPARATE_NUM 4
#define SEPARATOR_VALUE 0xFFFFFFFF #define SEPARATOR_VALUE 0xFFFFFFFF
#define REG_NUM_PER_LINE 4 #define REG_NUM_PER_LINE 4
...@@ -2704,6 +2731,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { ...@@ -2704,6 +2731,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_mtu = hclgevf_set_mtu, .set_mtu = hclgevf_set_mtu,
.get_global_queue_id = hclgevf_get_qid_global, .get_global_queue_id = hclgevf_get_qid_global,
.set_timer_task = hclgevf_set_timer_task, .set_timer_task = hclgevf_set_timer_task,
.get_link_mode = hclgevf_get_link_mode,
}; };
static struct hnae3_ae_algo ae_algovf = { static struct hnae3_ae_algo ae_algovf = {
......
...@@ -145,6 +145,8 @@ struct hclgevf_mac { ...@@ -145,6 +145,8 @@ struct hclgevf_mac {
int link; int link;
u8 duplex; u8 duplex;
u32 speed; u32 speed;
u64 supported;
u64 advertising;
}; };
struct hclgevf_hw { struct hclgevf_hw {
......
...@@ -197,6 +197,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -197,6 +197,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
break; break;
case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
/* set this mbx event as pending. This is required as we /* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy * might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just * handling. This shall be cleared when mbx task just
...@@ -247,6 +248,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -247,6 +248,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
u8 duplex; u8 duplex;
u32 speed; u32 speed;
u32 tail; u32 tail;
u8 idx;
/* we can safely clear it now as we are at start of the async message /* we can safely clear it now as we are at start of the async message
* processing * processing
...@@ -270,12 +272,22 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -270,12 +272,22 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]); link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed)); memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]); duplex = (u8)le16_to_cpu(msg_q[4]);
hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */ /* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status); hclgevf_update_link_status(hdev, link_status);
hclgevf_update_speed_duplex(hdev, speed, duplex); hclgevf_update_speed_duplex(hdev, speed, duplex);
break; break;
case HCLGE_MBX_LINK_STAT_MODE:
idx = (u8)le16_to_cpu(msg_q[1]);
if (idx)
memcpy(&hdev->hw.mac.supported, &msg_q[2],
sizeof(unsigned long));
else
memcpy(&hdev->hw.mac.advertising, &msg_q[2],
sizeof(unsigned long));
break;
case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_ASSERTING_RESET:
/* PF has asserted reset hence VF should go in pending /* PF has asserted reset hence VF should go in pending
* state and poll for the hardware reset status till it * state and poll for the hardware reset status till it
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment