Commit fd0fb612 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-debugfs'

Salil Mehta says:

====================
net: hns3: Adds support of debugfs to HNS3 driver

This patchset adds support of debugfs to the HNS3 driver.

Support has been added to query info related to below items:
1. Queue related ("echo queue info [queue no] > cmd")
2. Flow Director ("echo dump fd tcam > cmd")
3. TC config ("echo dump tc > cmd")
4. Transmit Module/Scheduler ("echo dump tm > cmd")
5. QoS pause ("echo dump qos pause cfg > cmd")
6. QoS buffer ("echo dump qos pri map > cmd")
7. QoS prio map ("echo dump qos buf cfg > cmd")

NOTE: Above commands are *read-only* and are only intended to
query the information from the SoC(and dump inside the kernel,
for now) and in no way tries to perform write operations for
the purpose of configuration etc.

Change Log
----------
V1-->V2:
   * Addressed the comments provided by Jakub Kicinski.
     1. Removed the .rej files mistakenly made part of Flow Director patch.
        Link: https://lkml.org/lkml/2018/11/20/249
     2. Added command summary in the cover letter
        Link: https://lkml.org/lkml/2018/11/22/1
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53b44cb9 7d9d7f88
......@@ -9,6 +9,6 @@ obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
......@@ -453,6 +453,7 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
......@@ -566,6 +567,7 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
};
#define hnae3_set_field(origin, mask, shift, val) \
......
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 Hisilicon Limited. */
#include <linux/debugfs.h>
#include <linux/device.h>
#include "hnae3.h"
#include "hns3_enet.h"
#define HNS3_DBG_READ_LEN 256
static struct dentry *hns3_dbgfs_root;
static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
{
struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
u32 value, i = 0;
int cnt;
if (!priv->ring_data) {
dev_err(&h->pdev->dev, "ring_data is NULL\n");
return -EFAULT;
}
queue_max = h->kinfo.num_tqps;
cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
if (cnt)
queue_num = 0;
else
queue_max = queue_num + 1;
dev_info(&h->pdev->dev, "queue info\n");
if (queue_num >= h->kinfo.num_tqps) {
dev_err(&h->pdev->dev,
"Queue number(%u) is out of range(%u)\n", queue_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
ring_data = priv->ring_data;
for (i = queue_num; i < queue_max; i++) {
/* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure
* that the following code is executed within 100ms.
*/
if (test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM;
ring = ring_data[i + h->kinfo.num_tqps].ring;
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_L_REG);
dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG);
dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG);
dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG);
dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG);
dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
ring = ring_data[i].ring;
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_L_REG);
dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
base_add_h, base_add_l);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG);
dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG);
dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG);
dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG);
dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
value);
}
return 0;
}
static void hns3_dbg_help(struct hnae3_handle *h)
{
dev_info(&h->pdev->dev, "available commands\n");
dev_info(&h->pdev->dev, "queue info [number]\n");
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump tc\n");
dev_info(&h->pdev->dev, "dump tm\n");
dev_info(&h->pdev->dev, "dump qos pause cfg\n");
dev_info(&h->pdev->dev, "dump qos pri map\n");
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int uncopy_bytes;
char *buf;
int len;
if (*ppos != 0)
return 0;
if (count < HNS3_DBG_READ_LEN)
return -ENOSPC;
buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
"Please echo help to cmd to get help information");
uncopy_bytes = copy_to_user(buffer, buf, len);
kfree(buf);
if (uncopy_bytes)
return -EFAULT;
return (*ppos = len);
}
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct hnae3_handle *handle = filp->private_data;
struct hns3_nic_priv *priv = handle->priv;
char *cmd_buf, *cmd_buf_tmp;
int uncopied_bytes;
int ret = 0;
if (*ppos != 0)
return 0;
/* Judge if the instance is being reset. */
if (test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
if (uncopied_bytes) {
kfree(cmd_buf);
return -EFAULT;
}
cmd_buf[count] = '\0';
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
count = cmd_buf_tmp - cmd_buf + 1;
}
if (strncmp(cmd_buf, "help", 4) == 0)
hns3_dbg_help(handle);
else if (strncmp(cmd_buf, "queue info", 10) == 0)
ret = hns3_dbg_queue_info(handle, cmd_buf);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
if (ret)
hns3_dbg_help(handle);
kfree(cmd_buf);
cmd_buf = NULL;
return count;
}
static const struct file_operations hns3_dbg_cmd_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = hns3_dbg_cmd_read,
.write = hns3_dbg_cmd_write,
};
void hns3_dbg_init(struct hnae3_handle *handle)
{
const char *name = pci_name(handle->pdev);
struct dentry *pfile;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
if (!handle->hnae3_dbgfs)
return;
pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
&hns3_dbg_cmd_fops);
if (!pfile) {
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
dev_warn(&handle->pdev->dev, "create file for %s fail\n",
name);
}
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
{
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
{
hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
if (!hns3_dbgfs_root) {
pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
return;
}
}
void hns3_dbg_unregister_debugfs(void)
{
debugfs_remove_recursive(hns3_dbgfs_root);
hns3_dbgfs_root = NULL;
}
......@@ -3620,6 +3620,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_dcbnl_setup(handle);
hns3_dbg_init(handle);
/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
netdev->max_mtu = HNS3_MAX_MTU;
......@@ -3676,6 +3678,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_put_ring_config(priv);
hns3_dbg_uninit(handle);
priv->ring_data = NULL;
out_netdev_free:
......@@ -4245,15 +4249,23 @@ static int __init hns3_init_module(void)
INIT_LIST_HEAD(&client.node);
hns3_dbg_register_debugfs(hns3_driver_name);
ret = hnae3_register_client(&client);
if (ret)
return ret;
goto err_reg_client;
ret = pci_register_driver(&hns3_driver);
if (ret)
hnae3_unregister_client(&client);
goto err_reg_driver;
return ret;
err_reg_driver:
hnae3_unregister_client(&client);
err_reg_client:
hns3_dbg_unregister_debugfs();
return ret;
}
module_init(hns3_init_module);
......@@ -4265,6 +4277,7 @@ static void __exit hns3_exit_module(void)
{
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
hns3_dbg_unregister_debugfs();
}
module_exit(hns3_exit_module);
......
......@@ -683,4 +683,8 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle);
static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
#endif
void hns3_dbg_init(struct hnae3_handle *handle);
void hns3_dbg_uninit(struct hnae3_handle *handle);
void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
void hns3_dbg_unregister_debugfs(void);
#endif
......@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
......@@ -126,6 +126,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
......
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 Hisilicon Limited. */
#include <linux/device.h>
#include "hclge_debugfs.h"
#include "hclge_cmd.h"
#include "hclge_main.h"
#include "hclge_tm.h"
#include "hnae3.h"
static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
char *title_buf, char *true_buf,
char *false_buf)
{
if (flag)
dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
true_buf);
else
dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
false_buf);
}
static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
{
struct hclge_ets_tc_weight_cmd *ets_weight;
struct hclge_desc desc;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
return;
}
ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "dump tc\n");
dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
ets_weight->weight_offset);
for (i = 0; i < HNAE3_MAX_TC; i++)
hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
"tc", "no sp mode", "sp mode");
}
static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
enum hclge_opcode_type cmd;
struct hclge_desc desc;
int ret;
cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_pg_cmd_send;
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
pg_shap_cfg_cmd->pg_shapping_para);
cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_pg_cmd_send;
pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
pg_shap_cfg_cmd->pg_shapping_para);
cmd = HCLGE_OPC_TM_PORT_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_pg_cmd_send;
port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
port_shap_cfg_cmd->port_shapping_para);
cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_pg_cmd_send;
dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_pg_cmd_send;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
bp_to_qs_map_cmd->tc_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
bp_to_qs_map_cmd->qs_group_id);
dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
bp_to_qs_map_cmd->qs_bit_map);
return;
err_tm_pg_cmd_send:
dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
cmd, ret);
}
static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
{
struct hclge_priority_weight_cmd *priority_weight;
struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
struct hclge_pg_weight_cmd *pg_weight;
struct hclge_qs_weight_cmd *qs_weight;
enum hclge_opcode_type cmd;
struct hclge_desc desc;
int ret;
cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "dump tm\n");
dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
pg_to_pri_map->pg_id);
dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
pg_to_pri_map->pri_bit_map);
cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
qs_to_pri_map->qs_id);
dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
qs_to_pri_map->priority);
dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
qs_to_pri_map->link_vld);
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
nq_to_qs_map->qset_id);
cmd = HCLGE_OPC_TM_PG_WEIGHT;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
cmd = HCLGE_OPC_TM_QS_WEIGHT;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
cmd = HCLGE_OPC_TM_PRI_WEIGHT;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
shap_cfg_cmd->pri_shapping_para);
cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_cmd_send;
shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
shap_cfg_cmd->pri_shapping_para);
hclge_dbg_dump_tm_pg(hdev);
return;
err_tm_cmd_send:
dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
cmd, ret);
}
static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
{
struct hclge_cfg_pause_param_cmd *pause_param;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
ret);
return;
}
pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
pause_param->pause_trans_gap);
dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
pause_param->pause_trans_time);
}
static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
{
struct hclge_qos_pri_map_cmd *pri_map;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"dump qos pri map fail, status is %d.\n", ret);
return;
}
pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "dump qos pri map\n");
dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
}
static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_rx_com_wl *rx_com_wl;
enum hclge_opcode_type cmd;
struct hclge_desc desc[2];
int i, ret;
cmd = HCLGE_OPC_TX_BUFF_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret)
goto err_qos_cmd_send;
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
tx_buf_cmd->tx_pkt_buff[i]);
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret)
goto err_qos_cmd_send;
dev_info(&hdev->pdev->dev, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
rx_buf_cmd->buf_num[i]);
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
rx_buf_cmd->shared_buf);
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
goto err_qos_cmd_send;
dev_info(&hdev->pdev->dev, "\n");
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
goto err_qos_cmd_send;
dev_info(&hdev->pdev->dev, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
rx_com_thrd->com_thrd[i].high,
rx_com_thrd->com_thrd[i].low);
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
rx_com_thrd->com_thrd[i].high,
rx_com_thrd->com_thrd[i].low);
cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
hclge_cmd_setup_basic_desc(desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret)
goto err_qos_cmd_send;
rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
hclge_cmd_setup_basic_desc(desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret)
goto err_qos_cmd_send;
rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
return;
err_qos_cmd_send:
dev_err(&hdev->pdev->dev,
"dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
}
static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
bool sel_x, u32 loc)
{
struct hclge_fd_tcam_config_1_cmd *req1;
struct hclge_fd_tcam_config_2_cmd *req2;
struct hclge_fd_tcam_config_3_cmd *req3;
struct hclge_desc desc[3];
int ret, i;
u32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
req1->stage = stage;
req1->xy_sel = sel_x ? 1 : 0;
req1->index = cpu_to_le32(loc);
ret = hclge_cmd_send(&hdev->hw, desc, 3);
if (ret)
return;
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
}
static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
{
u32 i;
for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
hclge_dbg_fd_tcam_read(hdev, 0, true, i);
hclge_dbg_fd_tcam_read(hdev, 0, false, i);
}
}
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
hclge_dbg_fd_tcam(hdev);
} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
hclge_dbg_dump_tc(hdev);
} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
hclge_dbg_dump_tm(hdev);
} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
hclge_dbg_dump_qos_pause_cfg(hdev);
} else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
hclge_dbg_dump_qos_pri_map(hdev);
} else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
hclge_dbg_dump_qos_buf_cfg(hdev);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
}
return 0;
}
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 Hisilicon Limited. */
#ifndef __HCLGE_DEBUGFS_H
#define __HCLGE_DEBUGFS_H
#pragma pack(1)
struct hclge_qos_pri_map_cmd {
u8 pri0_tc : 4,
pri1_tc : 4;
u8 pri2_tc : 4,
pri3_tc : 4;
u8 pri4_tc : 4,
pri5_tc : 4;
u8 pri6_tc : 4,
pri7_tc : 4;
u8 vlan_pri : 4,
rev : 4;
};
#pragma pack()
#endif
......@@ -7826,6 +7826,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
.dbg_run_cmd = hclge_dbg_run_cmd,
.process_hw_error = hclge_process_ras_hw_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
......
......@@ -814,4 +814,5 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
#endif
......@@ -55,6 +55,12 @@ struct hclge_qs_weight_cmd {
u8 dwrr;
};
struct hclge_ets_tc_weight_cmd {
u8 tc_weight[HNAE3_MAX_TC];
u8 weight_offset;
u8 rsvd[15];
};
#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
#define HCLGE_TM_SHAP_IR_B_LSH 0
#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment