Commit 10086b34 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: add some cleanups & bugfix

This patch-set includes cleanups and bugfix for the HNS3 ethernet
controller driver.

[patch 01/06 - 03/06] adds some cleanups.

[patch 04/06] changes the print level of RAS.

[patch 05/06] fixes a bug related to MAC TNL.

[patch 06/06] adds phy_attached_info().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d83d508b 1bef61fc
......@@ -104,7 +104,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
int ret = 0;
if (!client)
return -ENODEV;
......@@ -123,7 +122,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
ret = hnae3_init_client_instance(client, ae_dev);
int ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
......@@ -164,7 +163,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
int ret = 0;
int ret;
if (!ae_algo)
return;
......@@ -258,7 +257,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
int ret = 0;
int ret;
if (!ae_dev)
return -ENODEV;
......
......@@ -87,8 +87,8 @@ struct hnae3_queue {
struct hnae3_handle *handle;
int tqp_index; /* index in a handle */
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 tx_desc_num;/* total number of tx desc */
u16 rx_desc_num;/* total number of rx desc */
u16 tx_desc_num; /* total number of tx desc */
u16 rx_desc_num; /* total number of rx desc */
};
struct hns3_mac_stats {
......@@ -96,7 +96,7 @@ struct hns3_mac_stats {
u64 rx_pause_cnt;
};
/*hnae3 loop mode*/
/* hnae3 loop mode */
enum hnae3_loop {
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
......@@ -621,7 +621,7 @@ struct hnae3_handle {
struct pci_dev *pdev;
void *priv;
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the capabilities for this handle*/
u64 flags; /* Indicate the capabilities for this handle */
union {
struct net_device *netdev; /* first member */
......
......@@ -229,9 +229,9 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
*
* Default: enable interrupt coalescing self-adaptive and GL
*/
/* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
......@@ -4207,8 +4207,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
static void hns3_store_coal(struct hns3_nic_priv *priv)
{
/* ethtool only support setting and querying one coal
* configuation for now, so save the vector 0' coal
* configuation here in order to restore it.
* configuration for now, so save the vector 0' coal
* configuration here in order to restore it.
*/
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
sizeof(struct hns3_enet_coalesce));
......
......@@ -302,7 +302,7 @@ struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
/* priv data for the desc, e.g. skb when use with ip stack*/
/* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
u32 page_offset;
u32 length; /* length of the buffer */
......@@ -325,11 +325,11 @@ enum hns3_pkt_l3type {
HNS3_L3_TYPE_MAC_PAUSE,
HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
/* reserved for 0xA~0xB*/
/* reserved for 0xA~0xB */
HNS3_L3_TYPE_CNM = 0xc,
/* reserved for 0xD~0xE*/
/* reserved for 0xD~0xE */
HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
......@@ -354,7 +354,7 @@ enum hns3_pkt_ol3type {
HNS3_OL3_TYPE_IPV4_OPT = 4,
HNS3_OL3_TYPE_IPV6_EXT,
/* reserved for 0x6~0xE*/
/* reserved for 0x6~0xE */
HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
......
......@@ -635,7 +635,7 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
&cmd->base.speed,
&cmd->base.duplex);
/* 2.get link mode*/
/* 2.get link mode */
if (ops->get_link_mode)
ops->get_link_mode(h,
cmd->link_modes.supported,
......@@ -704,7 +704,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
static int hns3_check_ksettings_param(struct net_device *netdev,
static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
......@@ -749,7 +749,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
int ret = 0;
int ret;
/* Chip don't support this mode. */
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
......
......@@ -897,14 +897,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
/* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
/* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
/* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
......
......@@ -637,7 +637,7 @@ static void hclge_log_error(struct device *dev, char *reg,
{
while (err->msg) {
if (err->int_msk & err_sts) {
dev_warn(dev, "%s %s found [error status=0x%x]\n",
dev_err(dev, "%s %s found [error status=0x%x]\n",
reg, err->msg, err_sts);
if (err->reset_level &&
err->reset_level != HNAE3_NONE_RESET)
......@@ -1163,7 +1163,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
......@@ -1200,8 +1200,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 1));
if (status) {
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
"rpu_rx_pkt_ecc_mbit_err");
dev_err(dev,
"PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n");
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
......@@ -1379,15 +1379,15 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
return ret;
}
dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
dev_err(dev, "AXI3: %08X %08X %08X %08X\n",
le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
......@@ -1408,11 +1408,11 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
return ret;
}
dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0;
......@@ -1442,7 +1442,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
le32_to_cpu(desc[0].data[0]);
while (err->msg) {
if (err->int_msk == err_sts) {
dev_warn(dev, "%s [error status=0x%x] found\n",
dev_err(dev, "%s [error status=0x%x] found\n",
err->msg,
le32_to_cpu(desc[0].data[0]));
break;
......@@ -1452,12 +1452,12 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
}
if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
le32_to_cpu(desc[0].data[1]));
}
if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
le32_to_cpu(desc[0].data[2]));
}
......@@ -1486,10 +1486,10 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
dev_warn(dev, "ROCEE RAS AXI rresp error\n");
dev_err(dev, "ROCEE RAS AXI rresp error\n");
if (status & HCLGE_ROCEE_BERR_INT_MASK)
dev_warn(dev, "ROCEE RAS AXI bresp error\n");
dev_err(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
......@@ -1499,7 +1499,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
dev_err(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev);
......@@ -1557,8 +1557,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
enum hnae3_reset_type reset_type;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21)
......@@ -1640,7 +1640,7 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
dev_warn(dev,
dev_err(dev,
"HNS Non-Fatal RAS error(status=0x%x) identified\n",
status);
hclge_handle_all_ras_errors(hdev);
......@@ -1649,7 +1649,7 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
/* Handling Non-fatal Rocee RAS errors */
if (hdev->pdev->revision >= 0x21 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
......@@ -1728,7 +1728,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
vf_id, q_id);
if (vf_id) {
......@@ -1746,7 +1746,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
dev_err(dev, "inform reset to vf(%u) failed %d!\n",
hdev->vport->vport_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
......@@ -1793,7 +1793,7 @@ static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status)
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
status);
/* clear all main PF MSIx errors */
......@@ -1988,7 +1988,7 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
/* Handle Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
dev_warn(dev, "HNS hw error(RAS) identified during init\n");
dev_err(dev, "HNS hw error(RAS) identified during init\n");
hclge_handle_all_ras_errors(hdev);
}
......
......@@ -1128,6 +1128,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
......@@ -2810,9 +2811,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
* defer the processing of the mailbox events. Since, we would have not
* cleared RX CMDQ event this time we would receive again another
* interrupt from H/W just for the mailbox.
*
* check for vector0 reset event sources
*/
/* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
......@@ -4364,8 +4365,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
struct hclge_ctrl_vector_chain_cmd *req
= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
struct hclge_ctrl_vector_chain_cmd *req =
(struct hclge_ctrl_vector_chain_cmd *)desc.data;
enum hclge_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
......@@ -6379,6 +6380,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclge_reset_tqp(handle, i);
hclge_config_mac_tnl_int(hdev, false);
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
......@@ -8000,7 +8003,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
return -EBUSY;
}
/* When port base vlan enabled, we use port base vlan as the vlan
/* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
* when user add new vlan or remove exist vlan, just update the vport
* vlan list. The vlan id in vlan list will be writen in vlan filter
......@@ -8019,7 +8022,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
} else if (is_kill) {
/* When remove hw vlan filter failed, record the vlan id,
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
......@@ -8656,7 +8659,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
return ret;
return 0;
clear_nic:
hdev->nic_client = NULL;
......
......@@ -1000,7 +1000,6 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
......
......@@ -479,7 +479,7 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
hclge_reset_vf_queue(vport, queue_id);
/* send response msg to VF after queue reset complete*/
/* send response msg to VF after queue reset complete */
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
......
......@@ -231,6 +231,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->advertising);
phy_attached_info(phydev);
return 0;
}
......
......@@ -404,8 +404,8 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
u32 shapping_para = 0;
u8 ir_u, ir_b, ir_s;
u32 shapping_para;
int ret;
ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
......
......@@ -43,7 +43,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
int clean = 0;
int clean;
u32 head;
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
......
......@@ -1269,7 +1269,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
/* When remove hw vlan filter failed, record the vlan id,
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
......@@ -1561,7 +1561,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
/* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
......@@ -1784,9 +1784,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* 1b and 2. cases but we will not get any intimation about 1a
* from PF as cmdq would be in unreliable state i.e. mailbox
* communication between PF and VF would be broken.
*/
/* if we are never geting into pending state it means either:
*
* if we are never geting into pending state it means either:
* 1. PF is not receiving our request which could be due to IMP
* reset
* 2. PF is screwed
......@@ -2303,7 +2302,7 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
int ret = 0;
int ret;
hclgevf_get_misc_vector(hdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment