Commit e1c9a0dc authored by Lijun Ou's avatar Lijun Ou Committed by Jason Gunthorpe

RDMA/hns: Dump detailed driver-specific CQ

This patch adds support of resource track for hip08 and take dumping cq
context state used for debugging as an example.  More resources track
supports for hns driver will be added in future.

The output should be as follows.
$ rdma res show cq dev hnseth0 -d
dev hnseth0 cqe 1023 users 2 poll-ctx WORKQUEUE pid 0 comm [ib_core] drv_state 2 drv_ceq
n 0 drv_cqn 0 drv_hopnum 1 drv_pi 0 drv_ci 0 drv_coalesce 0 drv_period 0 drv_cnt 0
Signed-off-by: default avatarTao Tian <tiantao6@huawei.com>
Signed-off-by: default avatarYangyang Li <liyangyang20@huawei.com>
Signed-off-by: default avatarchenglang <chenglang@huawei.com>
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 68e326de
......@@ -7,8 +7,8 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o
......@@ -53,6 +53,7 @@ enum {
HNS_ROCE_CMD_QUERY_QPC = 0x42,
HNS_ROCE_CMD_MODIFY_CQC = 0x52,
HNS_ROCE_CMD_QUERY_CQC = 0x53,
/* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
......
......@@ -867,6 +867,11 @@ struct hns_roce_work {
int sub_type;
};
struct hns_roce_dfx_hw {
int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
};
struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
......@@ -984,6 +989,7 @@ struct hns_roce_dev {
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
const struct hns_roce_dfx_hw *dfx;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
......@@ -1196,4 +1202,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res);
#endif /* _HNS_ROCE_DEVICE_H */
......@@ -6068,6 +6068,10 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
return ret;
}
static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
.query_cqc_info = hns_roce_v2_query_cqc_info,
};
static const struct ib_device_ops hns_roce_v2_dev_ops = {
.destroy_qp = hns_roce_v2_destroy_qp,
.modify_cq = hns_roce_v2_modify_cq,
......@@ -6140,6 +6144,7 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
int i;
hr_dev->hw = &hns_roce_hw_v2;
hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset;
......
......@@ -1799,6 +1799,9 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5];
};
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer);
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
// Copyright (c) 2019 Hisilicon Limited.
#include "hnae3.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hw_v2.h"
int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
int *buffer)
{
struct hns_roce_v2_cq_context *cq_context;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0,
HNS_ROCE_CMD_QUERY_CQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) {
dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
goto err_mailbox;
}
memcpy(buffer, cq_context, sizeof(*cq_context));
err_mailbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
......@@ -455,6 +455,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.destroy_ah = hns_roce_destroy_ah,
.destroy_cq = hns_roce_ib_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.fill_res_entry = hns_roce_fill_res_entry,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
.get_netdev = hns_roce_get_netdev,
......
// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
// Copyright (c) 2019 Hisilicon Limited.
#include <rdma/rdma_cm.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_netlink.h>
#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
static int hns_roce_fill_cq(struct sk_buff *msg,
struct hns_roce_v2_cq_context *context)
{
if (rdma_nl_put_driver_u32(msg, "state",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_ARM_ST_M,
V2_CQC_BYTE_4_ARM_ST_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "ceqn",
roce_get_field(context->byte_4_pg_ceqn,
V2_CQC_BYTE_4_CEQN_M,
V2_CQC_BYTE_4_CEQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cqn",
roce_get_field(context->byte_8_cqn,
V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "hopnum",
roce_get_field(context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S)))
goto err;
if (rdma_nl_put_driver_u32(
msg, "pi",
roce_get_field(context->byte_28_cq_pi,
V2_CQC_BYTE_28_CQ_PRODUCER_IDX_M,
V2_CQC_BYTE_28_CQ_PRODUCER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(
msg, "ci",
roce_get_field(context->byte_32_cq_ci,
V2_CQC_BYTE_32_CQ_CONSUMER_IDX_M,
V2_CQC_BYTE_32_CQ_CONSUMER_IDX_S)))
goto err;
if (rdma_nl_put_driver_u32(
msg, "coalesce",
roce_get_field(context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_MAX_CNT_M,
V2_CQC_BYTE_56_CQ_MAX_CNT_S)))
goto err;
if (rdma_nl_put_driver_u32(
msg, "period",
roce_get_field(context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_PERIOD_M,
V2_CQC_BYTE_56_CQ_PERIOD_S)))
goto err;
if (rdma_nl_put_driver_u32(msg, "cnt",
roce_get_field(context->byte_52_cqe_cnt,
V2_CQC_BYTE_52_CQE_CNT_M,
V2_CQC_BYTE_52_CQE_CNT_S)))
goto err;
return 0;
err:
return -EMSGSIZE;
}
static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
struct ib_cq *ib_cq = container_of(res, struct ib_cq, res);
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct hns_roce_v2_cq_context *context;
struct nlattr *table_attr;
int ret;
if (!hr_dev->dfx->query_cqc_info)
return -EINVAL;
context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL);
if (!context)
return -ENOMEM;
ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
if (ret)
goto err;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
if (hns_roce_fill_cq(msg, context))
goto err_cancel_table;
nla_nest_end(msg, table_attr);
kfree(context);
return 0;
err_cancel_table:
nla_nest_cancel(msg, table_attr);
err:
kfree(context);
return -EMSGSIZE;
}
int hns_roce_fill_res_entry(struct sk_buff *msg,
struct rdma_restrack_entry *res)
{
if (res->type == RDMA_RESTRACK_CQ)
return hns_roce_fill_res_cq_entry(msg, res);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment