Commit 8c1b4316 authored by Gal Pressman's avatar Gal Pressman Committed by Jason Gunthorpe

RDMA/efa: Split hardware stats to device and port stats

The hardware stats API distinguishes between device and port statistics,
split the EFA stats accordingly instead of always dumping everything.

Link: https://lore.kernel.org/r/20210712105923.17389-1-galpress@amazon.comReviewed-by: default avatarFiras JahJah <firasj@amazon.com>
Reviewed-by: default avatarYossi Leybovich <sleybo@amazon.com>
Signed-off-by: default avatarGal Pressman <galpress@amazon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 91607118
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* /*
* Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/ */
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -30,7 +30,21 @@ struct efa_user_mmap_entry { ...@@ -30,7 +30,21 @@ struct efa_user_mmap_entry {
u8 mmap_flag; u8 mmap_flag;
}; };
#define EFA_DEFINE_STATS(op) \ #define EFA_DEFINE_DEVICE_STATS(op) \
op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
op(EFA_COMPLETED_CMDS, "completed_cmds") \
op(EFA_CMDS_ERR, "cmds_err") \
op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
op(EFA_CREATE_QP_ERR, "create_qp_err") \
op(EFA_CREATE_CQ_ERR, "create_cq_err") \
op(EFA_REG_MR_ERR, "reg_mr_err") \
op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
op(EFA_CREATE_AH_ERR, "create_ah_err") \
op(EFA_MMAP_ERR, "mmap_err")
#define EFA_DEFINE_PORT_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \ op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \ op(EFA_TX_PKTS, "tx_pkts") \
op(EFA_RX_BYTES, "rx_bytes") \ op(EFA_RX_BYTES, "rx_bytes") \
...@@ -44,28 +58,24 @@ struct efa_user_mmap_entry { ...@@ -44,28 +58,24 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \ op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \ op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \ op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
op(EFA_COMPLETED_CMDS, "completed_cmds") \
op(EFA_CMDS_ERR, "cmds_err") \
op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
op(EFA_CREATE_QP_ERR, "create_qp_err") \
op(EFA_CREATE_CQ_ERR, "create_cq_err") \
op(EFA_REG_MR_ERR, "reg_mr_err") \
op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
op(EFA_CREATE_AH_ERR, "create_ah_err") \
op(EFA_MMAP_ERR, "mmap_err")
#define EFA_STATS_ENUM(ename, name) ename, #define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, name) [ename] = name, #define EFA_STATS_STR(ename, name) [ename] = name,
enum efa_hw_stats { enum efa_hw_device_stats {
EFA_DEFINE_STATS(EFA_STATS_ENUM) EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
};
static const char *const efa_device_stats_names[] = {
EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
};
enum efa_hw_port_stats {
EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
}; };
static const char *const efa_stats_names[] = { static const char *const efa_port_stats_names[] = {
EFA_DEFINE_STATS(EFA_STATS_STR) EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
}; };
#define EFA_CHUNK_PAYLOAD_SHIFT 12 #define EFA_CHUNK_PAYLOAD_SHIFT 12
...@@ -1904,33 +1914,53 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags) ...@@ -1904,33 +1914,53 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
return 0; return 0;
} }
struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{ {
return rdma_alloc_hw_stats_struct(efa_stats_names, return rdma_alloc_hw_stats_struct(efa_port_stats_names,
ARRAY_SIZE(efa_stats_names), ARRAY_SIZE(efa_port_stats_names),
RDMA_HW_STATS_DEFAULT_LIFESPAN); RDMA_HW_STATS_DEFAULT_LIFESPAN);
} }
struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev) struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
{ {
/* return rdma_alloc_hw_stats_struct(efa_device_stats_names,
* It is probably a bug that efa reports its port stats as device ARRAY_SIZE(efa_device_stats_names),
* stats RDMA_HW_STATS_DEFAULT_LIFESPAN);
*/
return efa_alloc_hw_port_stats(ibdev, 0);
} }
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, static int efa_fill_device_stats(struct efa_dev *dev,
u32 port_num, int index) struct rdma_hw_stats *stats)
{
struct efa_com_stats_admin *as = &dev->edev.aq.stats;
struct efa_stats *s = &dev->stats;
stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
stats->value[EFA_ALLOC_UCONTEXT_ERR] =
atomic64_read(&s->alloc_ucontext_err);
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
return ARRAY_SIZE(efa_device_stats_names);
}
static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
u32 port_num)
{ {
struct efa_com_get_stats_params params = {}; struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result; union efa_com_get_stats_result result;
struct efa_dev *dev = to_edev(ibdev);
struct efa_com_rdma_read_stats *rrs; struct efa_com_rdma_read_stats *rrs;
struct efa_com_messages_stats *ms; struct efa_com_messages_stats *ms;
struct efa_com_basic_stats *bs; struct efa_com_basic_stats *bs;
struct efa_com_stats_admin *as;
struct efa_stats *s;
int err; int err;
params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL; params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
...@@ -1969,24 +1999,16 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, ...@@ -1969,24 +1999,16 @@ int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err; stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes; stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
as = &dev->edev.aq.stats; return ARRAY_SIZE(efa_port_stats_names);
stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd); }
stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
s = &dev->stats;
stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
stats->value[EFA_ALLOC_UCONTEXT_ERR] =
atomic64_read(&s->alloc_ucontext_err);
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
return ARRAY_SIZE(efa_stats_names); int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index)
{
if (port_num)
return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
else
return efa_fill_device_stats(to_edev(ibdev), stats);
} }
enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev, enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment