Commit 01599847 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: misc updates for -net-next

This series includes some misc updates for the HNS3 ethernet driver.

[patch 1] adds a limitation for the error log in the
hns3_clean_tx_ring().
[patch 2] adds a check for pfmemalloc flag before reusing pages
since these pages may be used some special case.
[patch 3] assigns a default reset type 'HNAE3_NONE_RESET' to
VF's reset_type after initializing or reset.
[patch 4] unifies macro HCLGE_DFX_REG_TYPE_CNT's definition into
header file.
[patch 5] refines the parameter 'size' of snprintf() in the
hns3_init_module().
[patch 6] rewrites a debug message in hclge_put_vector().
[patch 7~9] adds some cleanups related to coding style.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 953c4a08 60df7e91
...@@ -176,7 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -176,7 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL; return -EINVAL;
} }
ring = &priv->ring[q_num]; ring = &priv->ring[q_num];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index; tx_index = (cnt == 1) ? value : tx_index;
...@@ -209,10 +209,10 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -209,10 +209,10 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri)); le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss)); dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
ring = &priv->ring[q_num + h->kinfo.num_tqps]; ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index; rx_index = (cnt == 1) ? value : tx_index;
rx_desc = &ring->desc[rx_index]; rx_desc = &ring->desc[rx_index];
addr = le64_to_cpu(rx_desc->addr); addr = le64_to_cpu(rx_desc->addr);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index); dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
......
...@@ -2094,10 +2094,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2094,10 +2094,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret; int ret;
ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
if (!ae_dev) { if (!ae_dev)
ret = -ENOMEM; return -ENOMEM;
return ret;
}
ae_dev->pdev = pdev; ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data; ae_dev->flag = ent->driver_data;
...@@ -2540,8 +2538,8 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring) ...@@ -2540,8 +2538,8 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
rmb(); /* Make sure head is ready before touch any data */ rmb(); /* Make sure head is ready before touch any data */
if (unlikely(!is_valid_clean_head(ring, head))) { if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean); ring->next_to_use, ring->next_to_clean);
u64_stats_update_begin(&ring->syncp); u64_stats_update_begin(&ring->syncp);
ring->stats.io_err_cnt++; ring->stats.io_err_cnt++;
...@@ -2627,6 +2625,12 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -2627,6 +2625,12 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
} }
static bool hns3_page_is_reusable(struct page *page)
{
return page_to_nid(page) == numa_mem_id() &&
!page_is_pfmemalloc(page);
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i, static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len, struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb) struct hns3_desc_cb *desc_cb)
...@@ -2641,7 +2645,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, ...@@ -2641,7 +2645,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Avoid re-using remote pages, or the stack is still using the page /* Avoid re-using remote pages, or the stack is still using the page
* when page_offset rollback to zero, flag default unreuse * when page_offset rollback to zero, flag default unreuse
*/ */
if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) || if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
(!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return; return;
...@@ -2860,7 +2864,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -2860,7 +2864,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */ /* We can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(desc_cb->priv) == numa_mem_id())) if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1; desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */ else /* This page cannot be reused so discard it */
put_page(desc_cb->priv); put_page(desc_cb->priv);
...@@ -4710,7 +4714,7 @@ static int __init hns3_init_module(void) ...@@ -4710,7 +4714,7 @@ static int __init hns3_init_module(void)
pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
client.type = HNAE3_CLIENT_KNIC; client.type = HNAE3_CLIENT_KNIC;
snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
hns3_driver_name); hns3_driver_name);
client.ops = &client_ops; client.ops = &client_ops;
......
...@@ -73,8 +73,6 @@ static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = { ...@@ -73,8 +73,6 @@ static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{ {
#define HCLGE_GET_DFX_REG_TYPE_CNT 4
struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT]; struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int entries_per_desc; int entries_per_desc;
int index; int index;
......
...@@ -1898,10 +1898,8 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev, ...@@ -1898,10 +1898,8 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
bd_num = max_t(u32, mpf_bd_num, pf_bd_num); bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc) { if (!desc)
ret = -ENOMEM; return -ENOMEM;
goto out;
}
ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num, ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
reset_requests); reset_requests);
......
...@@ -862,9 +862,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) ...@@ -862,9 +862,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
usleep_range(1000, 2000); usleep_range(1000, 2000);
} while (timeout++ < HCLGE_QUERY_MAX_CNT); } while (timeout++ < HCLGE_QUERY_MAX_CNT);
ret = hclge_parse_func_status(hdev, req); return hclge_parse_func_status(hdev, req);
return ret;
} }
static int hclge_query_pf_resource(struct hclge_dev *hdev) static int hclge_query_pf_resource(struct hclge_dev *hdev)
...@@ -882,12 +880,12 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -882,12 +880,12 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
} }
req = (struct hclge_pf_res_cmd *)desc.data; req = (struct hclge_pf_res_cmd *)desc.data;
hdev->num_tqps = __le16_to_cpu(req->tqp_num); hdev->num_tqps = le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size) if (req->tx_buf_size)
hdev->tx_buf_size = hdev->tx_buf_size =
__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
else else
hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
...@@ -895,7 +893,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -895,7 +893,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (req->dv_buf_size) if (req->dv_buf_size)
hdev->dv_buf_size = hdev->dv_buf_size =
__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
else else
hdev->dv_buf_size = HCLGE_DEFAULT_DV; hdev->dv_buf_size = HCLGE_DEFAULT_DV;
...@@ -903,10 +901,10 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -903,10 +901,10 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) { if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset = hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi = hdev->num_roce_msi =
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */ /* nic's msix numbers is always equals to the roce's. */
...@@ -919,7 +917,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) ...@@ -919,7 +917,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->roce_base_msix_offset; hdev->roce_base_msix_offset;
} else { } else {
hdev->num_msi = hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number), hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
hdev->num_nic_msi = hdev->num_msi; hdev->num_nic_msi = hdev->num_msi;
...@@ -1333,11 +1331,7 @@ static int hclge_get_cap(struct hclge_dev *hdev) ...@@ -1333,11 +1331,7 @@ static int hclge_get_cap(struct hclge_dev *hdev)
} }
/* get pf resource */ /* get pf resource */
ret = hclge_query_pf_resource(hdev); return hclge_query_pf_resource(hdev);
if (ret)
dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
return ret;
} }
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
...@@ -2621,30 +2615,21 @@ static int hclge_mac_init(struct hclge_dev *hdev) ...@@ -2621,30 +2615,21 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->hw.mac.duplex = HCLGE_MAC_FULL; hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex); hdev->hw.mac.duplex);
if (ret) { if (ret)
dev_err(&hdev->pdev->dev,
"Config mac speed dup fail ret=%d\n", ret);
return ret; return ret;
}
if (hdev->hw.mac.support_autoneg) { if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret) { if (ret)
dev_err(&hdev->pdev->dev,
"Config mac autoneg fail ret=%d\n", ret);
return ret; return ret;
}
} }
mac->link = 0; mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
if (ret) { if (ret)
dev_err(&hdev->pdev->dev,
"Fec mode init fail, ret = %d\n", ret);
return ret; return ret;
}
} }
ret = hclge_set_mac_mtu(hdev, hdev->mps); ret = hclge_set_mac_mtu(hdev, hdev->mps);
...@@ -2916,7 +2901,7 @@ static int hclge_get_status(struct hnae3_handle *handle) ...@@ -2916,7 +2901,7 @@ static int hclge_get_status(struct hnae3_handle *handle)
static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
{ {
if (pci_num_vf(hdev->pdev) == 0) { if (!pci_num_vf(hdev->pdev)) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"SRIOV is disabled, can not get vport(%d) info.\n", vf); "SRIOV is disabled, can not get vport(%d) info.\n", vf);
return NULL; return NULL;
...@@ -4101,7 +4086,7 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector) ...@@ -4101,7 +4086,7 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
vector_id = hclge_get_vector_index(hdev, vector); vector_id = hclge_get_vector_index(hdev, vector);
if (vector_id < 0) { if (vector_id < 0) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Get vector index fail. vector_id =%d\n", vector_id); "Get vector index fail. vector = %d\n", vector);
return vector_id; return vector_id;
} }
...@@ -6584,7 +6569,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, ...@@ -6584,7 +6569,7 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
hclge_cfg_mac_mode(hdev, en); hclge_cfg_mac_mode(hdev, en);
ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE); ret = hclge_mac_phy_link_status_wait(hdev, en, false);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"serdes loopback config mac mode timeout\n"); "serdes loopback config mac mode timeout\n");
...@@ -6642,7 +6627,7 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) ...@@ -6642,7 +6627,7 @@ static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
hclge_cfg_mac_mode(hdev, en); hclge_cfg_mac_mode(hdev, en);
ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE); ret = hclge_mac_phy_link_status_wait(hdev, en, true);
if (ret) if (ret)
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"phy loopback config mac mode timeout\n"); "phy loopback config mac mode timeout\n");
...@@ -9394,17 +9379,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9394,17 +9379,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
sema_init(&hdev->reset_sem, 1); sema_init(&hdev->reset_sem, 1);
ret = hclge_pci_init(hdev); ret = hclge_pci_init(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "PCI init failed\n");
goto out; goto out;
}
/* Firmware command queue initialize */ /* Firmware command queue initialize */
ret = hclge_cmd_queue_init(hdev); ret = hclge_cmd_queue_init(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
goto err_pci_uninit; goto err_pci_uninit;
}
/* Firmware command initialize */ /* Firmware command initialize */
ret = hclge_cmd_init(hdev); ret = hclge_cmd_init(hdev);
...@@ -9412,11 +9393,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9412,11 +9393,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_cmd_uninit; goto err_cmd_uninit;
ret = hclge_get_cap(hdev); ret = hclge_get_cap(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
ret);
goto err_cmd_uninit; goto err_cmd_uninit;
}
ret = hclge_configure(hdev); ret = hclge_configure(hdev);
if (ret) { if (ret) {
...@@ -9431,12 +9409,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9431,12 +9409,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
} }
ret = hclge_misc_irq_init(hdev); ret = hclge_misc_irq_init(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev,
"Misc IRQ(vector0) init error, ret = %d.\n",
ret);
goto err_msi_uninit; goto err_msi_uninit;
}
ret = hclge_alloc_tqps(hdev); ret = hclge_alloc_tqps(hdev);
if (ret) { if (ret) {
...@@ -9445,31 +9419,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9445,31 +9419,22 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
} }
ret = hclge_alloc_vport(hdev); ret = hclge_alloc_vport(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
goto err_msi_irq_uninit; goto err_msi_irq_uninit;
}
ret = hclge_map_tqp(hdev); ret = hclge_map_tqp(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
goto err_msi_irq_uninit; goto err_msi_irq_uninit;
}
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
ret = hclge_mac_mdio_config(hdev); ret = hclge_mac_mdio_config(hdev);
if (ret) { if (ret)
dev_err(&hdev->pdev->dev,
"mdio config fail ret=%d\n", ret);
goto err_msi_irq_uninit; goto err_msi_irq_uninit;
}
} }
ret = hclge_init_umv_space(hdev); ret = hclge_init_umv_space(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
goto err_mdiobus_unreg; goto err_mdiobus_unreg;
}
ret = hclge_mac_init(hdev); ret = hclge_mac_init(hdev);
if (ret) { if (ret) {
...@@ -10204,10 +10169,8 @@ static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev, ...@@ -10204,10 +10169,8 @@ static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
int *bd_num_list, int *bd_num_list,
u32 type_num) u32 type_num)
{ {
#define HCLGE_DFX_REG_BD_NUM 4
u32 entries_per_desc, desc_index, index, offset, i; u32 entries_per_desc, desc_index, index, offset, i;
struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM]; struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
int ret; int ret;
ret = hclge_query_bd_num_cmd_send(hdev, desc); ret = hclge_query_bd_num_cmd_send(hdev, desc);
...@@ -10320,10 +10283,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data) ...@@ -10320,10 +10283,8 @@ static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
buf_len = sizeof(*desc_src) * bd_num_max; buf_len = sizeof(*desc_src) * bd_num_max;
desc_src = kzalloc(buf_len, GFP_KERNEL); desc_src = kzalloc(buf_len, GFP_KERNEL);
if (!desc_src) { if (!desc_src)
dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
return -ENOMEM; return -ENOMEM;
}
for (i = 0; i < dfx_reg_type_num; i++) { for (i = 0; i < dfx_reg_type_num; i++) {
bd_num = bd_num_list[i]; bd_num = bd_num_list[i];
......
...@@ -139,6 +139,8 @@ ...@@ -139,6 +139,8 @@
#define HCLGE_PHY_MDIX_STATUS_B 6 #define HCLGE_PHY_MDIX_STATUS_B 6
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
#define HCLGE_GET_DFX_REG_TYPE_CNT 4
/* Factor used to calculate offset and bitmap of VF num */ /* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64 #define HCLGE_VF_NUM_PER_CMD 64
......
...@@ -1316,14 +1316,13 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, ...@@ -1316,14 +1316,13 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
msg_data[0] = is_kill; msg_data[0] = is_kill;
memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id)); memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
memcpy(&msg_data[3], &proto, sizeof(proto)); memcpy(&msg_data[3], &proto, sizeof(proto));
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
/* when remove hw vlan filter failed, record the vlan id, /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence * and try to remove it from hw later, to be consistence
* with stack. * with stack.
*/ */
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0);
if (is_kill && ret) if (is_kill && ret)
set_bit(vlan_id, hdev->vlan_del_fail_bmap); set_bit(vlan_id, hdev->vlan_del_fail_bmap);
...@@ -1862,6 +1861,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) ...@@ -1862,6 +1861,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev); hclgevf_reset_task_schedule(hdev);
} }
hdev->reset_type = HNAE3_NONE_RESET;
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
up(&hdev->reset_sem); up(&hdev->reset_sem);
} }
...@@ -2597,11 +2597,11 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) ...@@ -2597,11 +2597,11 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
if (hnae3_dev_roce_supported(hdev)) { if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset = hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
HCLGEVF_MSIX_OFT_ROCEE_M, HCLGEVF_MSIX_OFT_ROCEE_M,
HCLGEVF_MSIX_OFT_ROCEE_S); HCLGEVF_MSIX_OFT_ROCEE_S);
hdev->num_roce_msix = hdev->num_roce_msix =
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
/* nic's msix numbers is always equals to the roce's. */ /* nic's msix numbers is always equals to the roce's. */
...@@ -2614,7 +2614,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) ...@@ -2614,7 +2614,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
hdev->roce_base_msix_offset; hdev->roce_base_msix_offset;
} else { } else {
hdev->num_msi = hdev->num_msi =
hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number), hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
hdev->num_nic_msix = hdev->num_msi; hdev->num_nic_msix = hdev->num_msi;
...@@ -2711,16 +2711,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2711,16 +2711,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
int ret; int ret;
ret = hclgevf_pci_init(hdev); ret = hclgevf_pci_init(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "PCI initialization failed\n");
return ret; return ret;
}
ret = hclgevf_cmd_queue_init(hdev); ret = hclgevf_cmd_queue_init(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
goto err_cmd_queue_init; goto err_cmd_queue_init;
}
ret = hclgevf_cmd_init(hdev); ret = hclgevf_cmd_init(hdev);
if (ret) if (ret)
...@@ -2728,11 +2724,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2728,11 +2724,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
/* Get vf resource */ /* Get vf resource */
ret = hclgevf_query_vf_resource(hdev); ret = hclgevf_query_vf_resource(hdev);
if (ret) { if (ret)
dev_err(&hdev->pdev->dev,
"Query vf status error, ret = %d.\n", ret);
goto err_cmd_init; goto err_cmd_init;
}
ret = hclgevf_init_msi(hdev); ret = hclgevf_init_msi(hdev);
if (ret) { if (ret) {
...@@ -2742,13 +2735,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2742,13 +2735,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_state_init(hdev); hclgevf_state_init(hdev);
hdev->reset_level = HNAE3_VF_FUNC_RESET; hdev->reset_level = HNAE3_VF_FUNC_RESET;
hdev->reset_type = HNAE3_NONE_RESET;
ret = hclgevf_misc_irq_init(hdev); ret = hclgevf_misc_irq_init(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
ret);
goto err_misc_irq_init; goto err_misc_irq_init;
}
set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
...@@ -2765,10 +2756,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2765,10 +2756,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
} }
ret = hclgevf_set_handle_info(hdev); ret = hclgevf_set_handle_info(hdev);
if (ret) { if (ret)
dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
goto err_config; goto err_config;
}
ret = hclgevf_config_gro(hdev, true); ret = hclgevf_config_gro(hdev, true);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment