Commit e92453b9 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Guangbin Huang says:

====================
net: hns3: updates for -next

This series includes some updates for the HNS3 ethernet driver.

Change logs:
V1 -> V2:
 - Fix failed to apply to net-next problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e97e917b 29c17cb6
......@@ -135,10 +135,19 @@ struct hclge_vf_to_pf_msg {
struct hclge_pf_to_vf_msg {
u16 code;
u16 vf_mbx_msg_code;
u16 vf_mbx_msg_subcode;
u16 resp_status;
u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
union {
/* used for mbx response */
struct {
u16 vf_mbx_msg_code;
u16 vf_mbx_msg_subcode;
u16 resp_status;
u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
};
/* used for general mbx */
struct {
u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
};
};
};
struct hclge_mbx_vf_to_pf_cmd {
......
......@@ -96,6 +96,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
HNAE3_DEV_SUPPORT_CQ_B,
};
#define hnae3_dev_fd_supported(hdev) \
......@@ -155,6 +156,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
#define hnae3_ae_dev_cq_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
......
......@@ -149,6 +149,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
......@@ -160,6 +161,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
};
static void
......
......@@ -338,6 +338,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_PAUSE_B = 14,
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
HCLGE_COMM_CAP_CQ_B = 18,
};
enum HCLGE_COMM_API_CAP_BITS {
......
......@@ -5159,10 +5159,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
priv->tqp_vector[i].rx_group.dim.mode = mode;
}
/* only device version above V3(include V3), GL can switch CQ/EQ
* period mode.
*/
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
if (hnae3_ae_dev_cq_supported(ae_dev)) {
u32 new_mode;
u64 reg;
......
......@@ -1106,6 +1106,36 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
static bool
hns3_is_ringparam_changed(struct net_device *ndev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct hns3_ring_param *old_ringparam,
struct hns3_ring_param *new_ringparam)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
HNS3_RING_BD_MULTIPLE);
new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
HNS3_RING_BD_MULTIPLE);
old_ringparam->tx_desc_num = priv->ring[0].desc_num;
old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
netdev_info(ndev, "ringparam not changed\n");
return false;
}
return true;
}
static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
......@@ -1151,14 +1181,11 @@ static int hns3_set_ringparam(struct net_device *ndev,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct hns3_ring_param old_ringparam, new_ringparam;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_ring *tmp_rings;
bool if_running = netif_running(ndev);
u32 old_tx_desc_num, new_tx_desc_num;
u32 old_rx_desc_num, new_rx_desc_num;
u16 queue_num = h->kinfo.num_tqps;
u32 old_rx_buf_len;
int ret, i;
ret = hns3_check_ringparam(ndev, param, kernel_param);
......@@ -1169,43 +1196,36 @@ static int hns3_set_ringparam(struct net_device *ndev,
if (ret)
return ret;
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring[0].desc_num;
old_rx_desc_num = priv->ring[queue_num].desc_num;
old_rx_buf_len = priv->ring[queue_num].buf_size;
if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num &&
kernel_param->rx_buf_len == old_rx_buf_len)
if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
&old_ringparam, &new_ringparam))
return 0;
tmp_rings = hns3_backup_ringparam(priv);
if (!tmp_rings) {
netdev_err(ndev,
"backup ring param failed by allocating memory fail\n");
netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
return -ENOMEM;
}
netdev_info(ndev,
"Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n",
old_tx_desc_num, old_rx_desc_num,
new_tx_desc_num, new_rx_desc_num,
old_rx_buf_len, kernel_param->rx_buf_len);
"Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len);
hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
new_ringparam.rx_desc_num);
hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
hns3_change_rx_buf_len(ndev, old_rx_buf_len);
hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num);
hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
old_ringparam.rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
......@@ -1415,11 +1435,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
return 0;
}
static int hns3_check_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
static int
hns3_check_cqe_coalesce_param(struct net_device *netdev,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
!hnae3_ae_dev_cq_supported(ae_dev)) {
netdev_err(netdev, "coalesced cqe mode is not supported\n");
return -EOPNOTSUPP;
}
return 0;
}
static int
hns3_check_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd,
struct kernel_ethtool_coalesce *kernel_coal)
{
int ret;
ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
if (ret)
return ret;
ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
......@@ -1494,7 +1536,7 @@ static int hns3_set_coalesce(struct net_device *netdev,
if (hns3_nic_resetting(netdev))
return -EBUSY;
ret = hns3_check_coalesce_para(netdev, cmd);
ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
if (ret)
return ret;
......@@ -1855,23 +1897,27 @@ static int hns3_set_tunable(struct net_device *netdev,
case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
new_tx_spare_buf_size = *(u32 *)data;
netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
old_tx_spare_buf_size, new_tx_spare_buf_size);
ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
if (ret ||
(!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
int ret1;
netdev_warn(netdev,
"change tx spare buf size fail, revert to old value\n");
netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
ret1 = hns3_set_tx_spare_buf_size(netdev,
old_tx_spare_buf_size);
if (ret1) {
netdev_err(netdev,
"revert to old tx spare buf size fail\n");
netdev_err(netdev, "revert to old tx spare buf size fail\n");
return ret1;
}
return ret;
}
netdev_info(netdev, "the actvie tx spare buf size is %u, due to page order\n",
priv->ring->tx_spare->len);
break;
default:
ret = -EOPNOTSUPP;
......
......@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping {
u8 link_ext_substate;
};
struct hns3_ring_param {
u32 tx_desc_num;
u32 rx_desc_num;
u32 rx_buf_len;
};
#endif
......@@ -1977,7 +1977,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures
*
* This function handles all the PF RAS errors in the
* hw register/s using command.
* hw registers using command.
*/
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
......
......@@ -10449,6 +10449,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
dev_err(&hdev->pdev->dev,
"failed to set pf mtu for less than vport %d, mps = %u.\n",
i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
......
......@@ -102,7 +102,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
resp_pf_to_vf->msg_len = msg_len;
resp_pf_to_vf->msg.code = mbx_opcode;
memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
......
......@@ -2963,7 +2963,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
/* ensure vf tbl list as empty before init*/
/* ensure vf tbl list as empty before init */
ret = hclgevf_clear_vport_list(hdev);
if (ret) {
dev_err(&pdev->dev,
......@@ -3315,7 +3315,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
0x200 * j);
HCLGEVF_TQP_REG_SIZE * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
......
......@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
* to prtect the received_response from race condition
* to protect the received_response from race condition
*/
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
......@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
* message to PF.
* @hdev: pointer to struct hclgevf_dev
* @resp_msg: pointer to store the original message type and response status
* @len: the resp_msg data array length.
* @code0: the message opcode VF send to PF.
* @code1: the message sub-opcode VF send to PF.
* @resp_data: pointer to store response data from PF to VF.
* @resp_len: the length of resp_data from PF to VF.
*/
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment