Commit 5313794b authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: add some DFX info for HNS3 driver

This patch-set adds some DFX information to HNS3 driver, for easily
debug some problems, and fixes some related bugs.

[patch 1/12 - 4/12] adds debug info about reset & interrupt events

[patch 5/12 - 7/12] adds debug info about TX time out & fixes
related bugs

[patch 8/12] adds support for setting netif message level

[patch 9/12 - 10/12] adds debugfs command to dump NCL & MAC info

[patch 11/12] adds VF's queue statistics info updating

[patch 12/12] adds a check for debugfs help function to decide which
commands are supportable
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d7cc399e 97afd47b
......@@ -392,7 +392,8 @@ struct hnae3_ae_ops {
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
u64 *rx_cnt);
void (*get_strings)(struct hnae3_handle *handle,
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
......@@ -589,6 +590,9 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
/* Network interface message level enabled bits */
u32 msg_enable;
};
#define hnae3_set_field(origin, mask, shift, val) \
......
......@@ -239,6 +239,10 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "queue info [number]\n");
dev_info(&h->pdev->dev, "queue map\n");
dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
if (!hns3_is_phys_func(h->pdev))
return;
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump tc\n");
dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
......@@ -247,6 +251,9 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qos pri map\n");
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
dev_info(&h->pdev->dev, "dump mng tbl\n");
dev_info(&h->pdev->dev, "dump reset info\n");
dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
dev_info(&h->pdev->dev, "dump mac tnl status\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
......@@ -341,6 +348,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
ret = hns3_dbg_bd_info(handle, cmd_buf);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
else
ret = -EOPNOTSUPP;
if (ret)
hns3_dbg_help(handle);
......
......@@ -35,6 +35,13 @@ static const char hns3_driver_string[] =
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
static struct hnae3_client client;
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
......@@ -1628,13 +1635,19 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring = NULL;
struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
int fbd_num, fbd_oft;
int ebd_num, ebd_oft;
int bd_num, bd_err;
int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
for (i = 0; i < ndev->real_num_tx_queues; i++) {
for (i = 0; i < ndev->num_tx_queues; i++) {
struct netdev_queue *q;
unsigned long trans_start;
......@@ -1655,21 +1668,66 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
return false;
}
priv->tx_timeout_count++;
tx_ring = priv->ring_data[timeout_queue].ring;
napi = &tx_ring->tqp_vector->napi;
netdev_info(ndev,
"tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
tx_ring->next_to_clean, napi->state);
netdev_info(ndev,
"tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
netdev_info(ndev,
"seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
*/
if (h->ae_algo->ops->update_stats &&
h->ae_algo->ops->get_mac_pause_stats) {
u64 tx_pause_cnt, rx_pause_cnt;
h->ae_algo->ops->update_stats(h, &ndev->stats);
h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
&rx_pause_cnt);
netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
tx_pause_cnt, rx_pause_cnt);
}
hw_head = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
hw_tail = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
fbd_num = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG);
fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG);
ebd_num = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_EBDNUM_REG);
ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_EBD_OFFSET_REG);
bd_num = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG);
bd_err = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_BD_ERR_REG);
ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
netdev_info(ndev,
"tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
priv->tx_timeout_count,
timeout_queue,
tx_ring->next_to_use,
tx_ring->next_to_clean,
hw_head,
hw_tail,
"BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
bd_num, hw_head, hw_tail, bd_err,
readl(tx_ring->tqp_vector->mask_addr));
netdev_info(ndev,
"RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
return true;
}
......@@ -1682,8 +1740,6 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
if (!hns3_get_tx_timeo_queue_info(ndev))
return;
priv->tx_timeout_count++;
/* request the reset, and let the hclge to determine
* which reset level should be done
*/
......@@ -1708,7 +1764,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
static bool hns3_is_phys_func(struct pci_dev *pdev)
bool hns3_is_phys_func(struct pci_dev *pdev)
{
u32 dev_id = pdev->device;
......@@ -3688,6 +3744,21 @@ static void hns3_client_stop(struct hnae3_handle *handle)
handle->ae_algo->ops->client_stop(handle);
}
static void hns3_info_show(struct hns3_nic_priv *priv)
{
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
}
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
......@@ -3709,6 +3780,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->tx_timeout_count = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
......@@ -3775,6 +3848,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
if (netif_msg_drv(handle))
hns3_info_show(priv);
return ret;
out_client_start:
......@@ -3849,11 +3925,13 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
netdev_info(netdev, "link up\n");
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
netdev_info(netdev, "link down\n");
if (netif_msg_link(handle))
netdev_info(netdev, "link down\n");
}
}
......
......@@ -42,8 +42,10 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
#define HNS3_RING_TX_RING_OFFSET_REG 0x00064
#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
......@@ -661,6 +663,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
bool hns3_is_phys_func(struct pci_dev *pdev);
int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
......
......@@ -1110,6 +1110,20 @@ static int hns3_set_phys_id(struct net_device *netdev,
return h->ae_algo->ops->set_led_id(h, state);
}
static u32 hns3_get_msglevel(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
return h->msg_enable;
}
static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
h->msg_enable = msg_level;
}
static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
......@@ -1130,6 +1144,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.get_link = hns3_get_link,
.get_msglevel = hns3_get_msglevel,
.set_msglevel = hns3_set_msglevel,
};
static const struct ethtool_ops hns3_ethtool_ops = {
......@@ -1159,6 +1175,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.set_phys_id = hns3_set_phys_id,
.get_msglevel = hns3_get_msglevel,
.set_msglevel = hns3_set_msglevel,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
......
......@@ -109,6 +109,9 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
/* PFC/Pause commands */
......@@ -237,6 +240,9 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
/* NCL config command */
HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
/* SFP command */
HCLGE_OPC_SFP_GET_SPEED = 0x7104,
......
......@@ -901,6 +901,109 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
}
}
static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
{
dev_info(&hdev->pdev->dev, "PF reset count: %d\n",
hdev->rst_stats.pf_rst_cnt);
dev_info(&hdev->pdev->dev, "FLR reset count: %d\n",
hdev->rst_stats.flr_rst_cnt);
dev_info(&hdev->pdev->dev, "CORE reset count: %d\n",
hdev->rst_stats.core_rst_cnt);
dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n",
hdev->rst_stats.global_rst_cnt);
dev_info(&hdev->pdev->dev, "IMP reset count: %d\n",
hdev->rst_stats.imp_rst_cnt);
dev_info(&hdev->pdev->dev, "reset done count: %d\n",
hdev->rst_stats.reset_done_cnt);
dev_info(&hdev->pdev->dev, "HW reset done count: %d\n",
hdev->rst_stats.hw_reset_done_cnt);
dev_info(&hdev->pdev->dev, "reset count: %d\n",
hdev->rst_stats.reset_cnt);
}
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
*/
static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
#define HCLGE_CMD_DATA_NUM 6
struct hclge_desc desc[5];
u32 byte_offset;
int bd_num = 5;
int offset;
int length;
int data0;
int ret;
int i;
int j;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
return;
}
if (offset < 0 || length <= 0) {
dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
return;
}
dev_info(&hdev->pdev->dev, "offset | data\n");
while (length > 0) {
data0 = offset;
if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
else
data0 |= length << 16;
ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
HCLGE_OPC_QUERY_NCL_CONFIG);
if (ret)
return;
byte_offset = offset;
for (i = 0; i < bd_num; i++) {
for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
if (i == 0 && j == 0)
continue;
dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
byte_offset,
le32_to_cpu(desc[i].data[j]));
byte_offset += sizeof(u32);
length -= sizeof(u32);
if (length <= 0)
return;
}
}
offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
}
}
/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
* @hdev: pointer to struct hclge_dev
*/
static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
{
#define HCLGE_BILLION_NANO_SECONDS 1000000000
struct hclge_mac_tnl_stats stats;
unsigned long rem_nsec;
dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n",
(unsigned long)stats.time, rem_nsec / 1000,
stats.status);
}
}
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
{
struct hclge_vport *vport = hclge_get_vport(handle);
......@@ -924,6 +1027,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
hclge_dbg_dump_mng_table(hdev);
} else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
} else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
hclge_dbg_dump_ncl_config(hdev,
&cmd_buf[sizeof("dump ncl_config")]);
} else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
hclge_dbg_dump_mac_tnl_status(hdev);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
......
......@@ -692,6 +692,16 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
return ret;
}
static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev)
{
struct hclge_desc desc;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false);
desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
......@@ -911,6 +921,21 @@ static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
return ret;
}
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en)
{
struct hclge_desc desc;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN);
else
desc.data[0] = 0;
desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
bool en)
{
......@@ -1611,6 +1636,7 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
struct hclge_mac_tnl_stats mac_tnl_stats;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
enum hnae3_reset_type reset_level;
......@@ -1745,6 +1771,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
set_bit(HNAE3_GLOBAL_RESET, reset_requests);
}
/* query and clear mac tnl interruptions */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
true);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
goto msi_error;
}
status = le32_to_cpu(desc->data[0]);
if (status) {
/* When mac tnl interrupt occurs, we record current time and
* register status here in a fifo, then clear the status. So
* that if link status changes suddenly at some time, we can
* query them by debugfs.
*/
mac_tnl_stats.time = local_clock();
mac_tnl_stats.status = status;
kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
ret = hclge_clear_mac_tnl_int(hdev);
if (ret)
dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
set_bit(HNAE3_NONE_RESET, reset_requests);
}
msi_error:
kfree(desc);
out:
......
......@@ -47,6 +47,9 @@
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0)
#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0)
#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
......@@ -115,6 +118,7 @@ struct hclge_hw_error {
enum hnae3_reset_type reset_level;
};
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
......
......@@ -699,6 +699,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_tqps_get_stats(handle, p);
}
static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
u64 *rx_cnt)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
*tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
*rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
......@@ -2238,6 +2248,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
handle = &hdev->vport[i].nic;
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
rhandle = &hdev->vport[i].roce;
if (rclient && rclient->ops->link_status_change)
rclient->ops->link_status_change(rhandle,
......@@ -2360,6 +2371,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
......@@ -2368,6 +2380,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
......@@ -2376,12 +2389,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
hdev->rst_stats.core_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
/* check for vector0 msix event source */
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
msix_src_reg);
return HCLGE_VECTOR0_EVENT_ERR;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
......@@ -2390,6 +2407,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_MBX;
}
/* print other vector0 event source */
dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
cmdq_src_reg, msix_src_reg);
return HCLGE_VECTOR0_EVENT_OTHER;
}
......@@ -2873,6 +2893,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
* after hclge_cmd_init is called.
*/
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
/* There is no mechanism for PF to know if VF has stopped IO
......@@ -2881,6 +2902,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
msleep(100);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
......@@ -2961,7 +2983,7 @@ static void hclge_reset(struct hclge_dev *hdev)
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
hdev->reset_count++;
hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
......@@ -2987,6 +3009,8 @@ static void hclge_reset(struct hclge_dev *hdev)
goto err_reset;
}
hdev->rst_stats.hw_reset_done_cnt++;
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
goto err_reset;
......@@ -3030,6 +3054,7 @@ static void hclge_reset(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies;
hdev->reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
del_timer(&hdev->reset_timer);
......@@ -5224,7 +5249,7 @@ static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
return hdev->reset_count;
return hdev->rst_stats.hw_reset_done_cnt;
}
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
......@@ -7530,6 +7555,32 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
static void hclge_info_show(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
dev_info(dev, "PF info begin:\n");
dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
dev_info(dev, "PF info end.\n");
}
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
......@@ -7551,6 +7602,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
hnae3_set_client_init_flag(client, ae_dev, 1);
if (netif_msg_drv(&hdev->vport->nic))
hclge_info_show(hdev);
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
......@@ -7910,6 +7964,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
......@@ -8063,6 +8119,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
hclge_config_mac_tnl_int(hdev, false);
hclge_hw_error_set_state(hdev, false);
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
......@@ -8508,6 +8565,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
.get_mac_pause_stats = hclge_get_mac_pause_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
......
......@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/kfifo.h>
#include "hclge_cmd.h"
#include "hnae3.h"
......@@ -649,6 +650,23 @@ struct hclge_vport_vlan_cfg {
u16 vlan_id;
};
struct hclge_rst_stats {
u32 reset_done_cnt; /* the number of reset has completed */
u32 hw_reset_done_cnt; /* the number of HW reset has completed */
u32 pf_rst_cnt; /* the number of PF reset */
u32 flr_rst_cnt; /* the number of FLR */
u32 core_rst_cnt; /* the number of CORE reset */
u32 global_rst_cnt; /* the number of GLOBAL */
u32 imp_rst_cnt; /* the number of IMP reset */
u32 reset_cnt; /* the number of reset */
};
/* time and register status when mac tunnel interruption occur */
struct hclge_mac_tnl_stats {
u64 time;
u32 status;
};
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
......@@ -675,6 +693,7 @@ struct hclge_vport_vlan_cfg {
(y) = (_k_ ^ ~_v_) & (_k_); \
} while (0)
#define HCLGE_MAC_TNL_LOG_SIZE 8
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
......@@ -691,7 +710,7 @@ struct hclge_dev {
unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
unsigned long reset_count; /* the number of reset has been done */
struct hclge_rst_stats rst_stats;
u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
......@@ -791,6 +810,9 @@ struct hclge_dev {
struct mutex umv_mutex; /* protect share_umv_size */
struct mutex vport_cfg_mutex; /* Protect stored vf table */
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
};
/* VPort level vlan tag configuration for TX direction */
......
......@@ -1415,9 +1415,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
case HNAE3_VF_FUNC_RESET:
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
hdev->rst_stats.vf_func_rst_cnt++;
break;
case HNAE3_FLR_RESET:
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
break;
default:
break;
......@@ -1440,7 +1442,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
hdev->reset_count++;
hdev->rst_stats.rst_cnt++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
......@@ -1466,6 +1468,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
goto err_reset;
}
hdev->rst_stats.hw_rst_done_cnt++;
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
......@@ -1484,6 +1488,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
hdev->rst_stats.rst_done_cnt++;
return ret;
err_reset_lock:
......@@ -1644,6 +1649,7 @@ static void hclgevf_service_timer(struct timer_list *t)
mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
hdev->stats_timer++;
hclgevf_task_schedule(hdev);
}
......@@ -1764,9 +1770,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
static void hclgevf_service_task(struct work_struct *work)
{
struct hnae3_handle *handle;
struct hclgevf_dev *hdev;
hdev = container_of(work, struct hclgevf_dev, service_task);
handle = &hdev->nic;
if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
hclgevf_tqps_update_stats(handle);
hdev->stats_timer = 0;
}
/* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later
......@@ -1803,6 +1816,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
*clearval = cmdq_src_reg;
hdev->rst_stats.vf_rst_cnt++;
return HCLGEVF_VECTOR0_EVENT_RST;
}
......@@ -2215,6 +2229,23 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
static void hclgevf_info_show(struct hclgevf_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
dev_info(dev, "VF info begin:\n");
dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
dev_info(dev, "PF media type of this VF: %d\n",
hdev->hw.mac.media_type);
dev_info(dev, "VF info end.\n");
}
static int hclgevf_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
......@@ -2232,6 +2263,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hnae3_set_client_init_flag(client, ae_dev, 1);
if (netif_msg_drv(&hdev->nic))
hclgevf_info_show(hdev);
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
......@@ -2737,7 +2771,7 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
return hdev->reset_count;
return hdev->rst_stats.hw_rst_done_cnt;
}
static void hclgevf_get_link_mode(struct hnae3_handle *handle,
......
......@@ -116,6 +116,8 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
#define HCLGEVF_STATS_TIMER_INTERVAL (36)
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
......@@ -210,6 +212,15 @@ struct hclgevf_misc_vector {
int vector_irq;
};
struct hclgevf_rst_stats {
u32 rst_cnt; /* the number of reset */
u32 vf_func_rst_cnt; /* the number of VF function reset */
u32 flr_rst_cnt; /* the number of FLR */
u32 vf_rst_cnt; /* the number of VF reset */
u32 rst_done_cnt; /* the number of reset completed */
u32 hw_rst_done_cnt; /* the number of HW reset completed */
};
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
......@@ -227,7 +238,7 @@ struct hclgevf_dev {
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
unsigned long reset_count; /* the number of reset has been done */
struct hclgevf_rst_stats rst_stats;
u32 reset_attempts;
u32 fw_version;
......@@ -272,6 +283,7 @@ struct hclgevf_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
u32 flag;
u32 stats_timer;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
......
......@@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
"VF could not get mbx resp(=%d) from PF in %d tries\n",
hdev->mbx_resp.received_resp, i);
"VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO;
}
......@@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev,
"VF could not match resp code(code0=%d,code1=%d), %d",
"VF could not match resp code(code0=%d,code1=%d), %d\n",
code0, code1, mbx_resp->resp_status);
dev_err(&hdev->pdev->dev,
"VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
r_code0, r_code1);
return -EIO;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment