Commit e92453b9 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Guangbin Huang says:

====================
net: hns3: updates for -next

This series includes some updates for the HNS3 ethernet driver.

Change logs:
V1 -> V2:
 - Fix failed to apply to net-next problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e97e917b 29c17cb6
...@@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg { ...@@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg {
struct hclge_pf_to_vf_msg { struct hclge_pf_to_vf_msg {
u16 code; u16 code;
u16 vf_mbx_msg_code; union {
u16 vf_mbx_msg_subcode; /* used for mbx response */
u16 resp_status; struct {
u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE]; u16 vf_mbx_msg_code;
u16 vf_mbx_msg_subcode;
u16 resp_status;
u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
};
/* used for general mbx */
struct {
u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
};
};
}; };
struct hclge_mbx_vf_to_pf_cmd { struct hclge_mbx_vf_to_pf_cmd {
......
...@@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS { ...@@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
HNAE3_DEV_SUPPORT_CQ_B,
}; };
#define hnae3_dev_fd_supported(hdev) \ #define hnae3_dev_fd_supported(hdev) \
...@@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS { ...@@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
#define hnae3_ae_dev_cq_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
enum HNAE3_PF_CAP_BITS { enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
}; };
......
...@@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = { ...@@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
}; };
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
...@@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = { ...@@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B}, {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B}, {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
}; };
static void static void
......
...@@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS { ...@@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_PAUSE_B = 14, HCLGE_COMM_CAP_PAUSE_B = 14,
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15, HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17, HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
HCLGE_COMM_CAP_CQ_B = 18,
}; };
enum HCLGE_COMM_API_CAP_BITS { enum HCLGE_COMM_API_CAP_BITS {
......
...@@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, ...@@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
priv->tqp_vector[i].rx_group.dim.mode = mode; priv->tqp_vector[i].rx_group.dim.mode = mode;
} }
/* only device version above V3(include V3), GL can switch CQ/EQ if (hnae3_ae_dev_cq_supported(ae_dev)) {
* period mode.
*/
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
u32 new_mode; u32 new_mode;
u64 reg; u64 reg;
......
...@@ -1106,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev, ...@@ -1106,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0; return 0;
} }
static bool
hns3_is_ringparam_changed(struct net_device *ndev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct hns3_ring_param *old_ringparam,
struct hns3_ring_param *new_ringparam)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
HNS3_RING_BD_MULTIPLE);
new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
HNS3_RING_BD_MULTIPLE);
old_ringparam->tx_desc_num = priv->ring[0].desc_num;
old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
netdev_info(ndev, "ringparam not changed\n");
return false;
}
return true;
}
static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
{ {
struct hns3_nic_priv *priv = netdev_priv(ndev); struct hns3_nic_priv *priv = netdev_priv(ndev);
...@@ -1151,14 +1181,11 @@ static int hns3_set_ringparam(struct net_device *ndev, ...@@ -1151,14 +1181,11 @@ static int hns3_set_ringparam(struct net_device *ndev,
struct kernel_ethtool_ringparam *kernel_param, struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct hns3_ring_param old_ringparam, new_ringparam;
struct hns3_nic_priv *priv = netdev_priv(ndev); struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_ring *tmp_rings; struct hns3_enet_ring *tmp_rings;
bool if_running = netif_running(ndev); bool if_running = netif_running(ndev);
u32 old_tx_desc_num, new_tx_desc_num;
u32 old_rx_desc_num, new_rx_desc_num;
u16 queue_num = h->kinfo.num_tqps;
u32 old_rx_buf_len;
int ret, i; int ret, i;
ret = hns3_check_ringparam(ndev, param, kernel_param); ret = hns3_check_ringparam(ndev, param, kernel_param);
...@@ -1169,43 +1196,36 @@ static int hns3_set_ringparam(struct net_device *ndev, ...@@ -1169,43 +1196,36 @@ static int hns3_set_ringparam(struct net_device *ndev,
if (ret) if (ret)
return ret; return ret;
/* Hardware requires that its descriptors must be multiple of eight */ if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); &old_ringparam, &new_ringparam))
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring[0].desc_num;
old_rx_desc_num = priv->ring[queue_num].desc_num;
old_rx_buf_len = priv->ring[queue_num].buf_size;
if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num &&
kernel_param->rx_buf_len == old_rx_buf_len)
return 0; return 0;
tmp_rings = hns3_backup_ringparam(priv); tmp_rings = hns3_backup_ringparam(priv);
if (!tmp_rings) { if (!tmp_rings) {
netdev_err(ndev, netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
"backup ring param failed by allocating memory fail\n");
return -ENOMEM; return -ENOMEM;
} }
netdev_info(ndev, netdev_info(ndev,
"Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
old_tx_desc_num, old_rx_desc_num, old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
new_tx_desc_num, new_rx_desc_num, new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
old_rx_buf_len, kernel_param->rx_buf_len); old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
if (if_running) if (if_running)
ndev->netdev_ops->ndo_stop(ndev); ndev->netdev_ops->ndo_stop(ndev);
hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); new_ringparam.rx_desc_num);
hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
ret = hns3_init_all_ring(priv); ret = hns3_init_all_ring(priv);
if (ret) { if (ret) {
netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret); ret);
hns3_change_rx_buf_len(ndev, old_rx_buf_len); hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
hns3_change_all_ring_bd_num(priv, old_tx_desc_num, hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
old_rx_desc_num); old_ringparam.rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++) for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(&priv->ring[i], &tmp_rings[i], memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring)); sizeof(struct hns3_enet_ring));
...@@ -1415,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev, ...@@ -1415,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
return 0; return 0;
} }
static int hns3_check_coalesce_para(struct net_device *netdev, static int
struct ethtool_coalesce *cmd) hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
netdev_err(netdev, "coalesced cqe mode is not supported\n");
return -EOPNOTSUPP;
}
return 0;
}
static int
hns3_check_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd,
struct kernel_ethtool_coalesce *kernel_coal)
{ {
int ret; int ret;
ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
if (ret)
return ret;
ret = hns3_check_gl_coalesce_para(netdev, cmd); ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) { if (ret) {
netdev_err(netdev, netdev_err(netdev,
...@@ -1494,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev, ...@@ -1494,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev,
if (hns3_nic_resetting(netdev)) if (hns3_nic_resetting(netdev))
return -EBUSY; return -EBUSY;
ret = hns3_check_coalesce_para(netdev, cmd); ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
if (ret) if (ret)
return ret; return ret;
...@@ -1855,23 +1897,27 @@ static int hns3_set_tunable(struct net_device *netdev, ...@@ -1855,23 +1897,27 @@ static int hns3_set_tunable(struct net_device *netdev,
case ETHTOOL_TX_COPYBREAK_BUF_SIZE: case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data; new_tx_spare_buf_size = *(u32 *)data;
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
old_tx_spare_buf_size, new_tx_spare_buf_size);
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
if (ret || if (ret ||
(!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) { (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
int ret1; int ret1;
netdev_warn(netdev, netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
"change tx spare buf size fail, revert to old value\n");
ret1 = hns3_set_tx_spare_buf_size(netdev, ret1 = hns3_set_tx_spare_buf_size(netdev,
old_tx_spare_buf_size); old_tx_spare_buf_size);
if (ret1) { if (ret1) {
netdev_err(netdev, netdev_err(netdev, "revert to old tx spare buf size fail\n");
"revert to old tx spare buf size fail\n");
return ret1; return ret1;
} }
return ret; return ret;
} }
netdev_info(netdev, "the actvie tx spare buf size is %u, due to page order\n",
priv->ring->tx_spare->len);
break; break;
default: default:
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
......
...@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping { ...@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping {
u8 link_ext_substate; u8 link_ext_substate;
}; };
struct hns3_ring_param {
u32 tx_desc_num;
u32 rx_desc_num;
u32 rx_buf_len;
};
#endif #endif
...@@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, ...@@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures * @num: number of extended command structures
* *
* This function handles all the PF RAS errors in the * This function handles all the PF RAS errors in the
* hw register/s using command. * hw registers using command.
*/ */
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc, struct hclge_desc *desc,
......
...@@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) ...@@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */ /* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++) for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) { if (max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock); mutex_unlock(&hdev->vport_lock);
return -EINVAL; return -EINVAL;
} }
......
...@@ -102,7 +102,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, ...@@ -102,7 +102,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
resp_pf_to_vf->msg_len = msg_len; resp_pf_to_vf->msg_len = msg_len;
resp_pf_to_vf->msg.code = mbx_opcode; resp_pf_to_vf->msg.code = mbx_opcode;
memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len); memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
......
...@@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config; goto err_config;
} }
/* ensure vf tbl list as empty before init*/ /* ensure vf tbl list as empty before init */
ret = hclgevf_clear_vport_list(hdev); ret = hclgevf_clear_vport_list(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
...@@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, ...@@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
for (i = 0; i < reg_um; i++) for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw, *reg++ = hclgevf_read_dev(&hdev->hw,
ring_reg_addr_list[i] + ring_reg_addr_list[i] +
0x200 * j); HCLGEVF_TQP_REG_SIZE * j);
for (i = 0; i < separator_num; i++) for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE; *reg++ = SEPARATOR_VALUE;
} }
......
...@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) ...@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{ {
/* this function should be called with mbx_resp.mbx_mutex held /* this function should be called with mbx_resp.mbx_mutex held
* to prtect the received_response from race condition * to protect the received_response from race condition
*/ */
hdev->mbx_resp.received_resp = false; hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0; hdev->mbx_resp.origin_mbx_msg = 0;
...@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) ...@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox /* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
* message to PF. * message to PF.
* @hdev: pointer to struct hclgevf_dev * @hdev: pointer to struct hclgevf_dev
* @resp_msg: pointer to store the original message type and response status * @code0: the message opcode VF send to PF.
* @len: the resp_msg data array length. * @code1: the message sub-opcode VF send to PF.
* @resp_data: pointer to store response data from PF to VF.
* @resp_len: the length of resp_data from PF to VF.
*/ */
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len) u8 *resp_data, u16 resp_len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment