Commit 8c40602b authored by Guvenc Gulce's avatar Guvenc Gulce Committed by David S. Miller

net/smc: Add netlink support for SMC statistics

Add the netlink function which collects the statistics information and
delivers it to the userspace.
Signed-off-by: default avatarGuvenc Gulce <guvenc@linux.ibm.com>
Signed-off-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e0e4b8fa
...@@ -47,6 +47,7 @@ enum { ...@@ -47,6 +47,7 @@ enum {
SMC_NETLINK_GET_LGR_SMCD, SMC_NETLINK_GET_LGR_SMCD,
SMC_NETLINK_GET_DEV_SMCD, SMC_NETLINK_GET_DEV_SMCD,
SMC_NETLINK_GET_DEV_SMCR, SMC_NETLINK_GET_DEV_SMCR,
SMC_NETLINK_GET_STATS,
}; };
/* SMC_GENL_FAMILY top level attributes */ /* SMC_GENL_FAMILY top level attributes */
...@@ -58,6 +59,7 @@ enum { ...@@ -58,6 +59,7 @@ enum {
SMC_GEN_LGR_SMCD, /* nest */ SMC_GEN_LGR_SMCD, /* nest */
SMC_GEN_DEV_SMCD, /* nest */ SMC_GEN_DEV_SMCD, /* nest */
SMC_GEN_DEV_SMCR, /* nest */ SMC_GEN_DEV_SMCR, /* nest */
SMC_GEN_STATS, /* nest */
__SMC_GEN_MAX, __SMC_GEN_MAX,
SMC_GEN_MAX = __SMC_GEN_MAX - 1 SMC_GEN_MAX = __SMC_GEN_MAX - 1
}; };
...@@ -159,4 +161,71 @@ enum { ...@@ -159,4 +161,71 @@ enum {
SMC_NLA_DEV_MAX = __SMC_NLA_DEV_MAX - 1 SMC_NLA_DEV_MAX = __SMC_NLA_DEV_MAX - 1
}; };
/* SMC_NLA_STATS_T_TX(RX)_RMB_SIZE nested attributes */
/* SMC_NLA_STATS_TX(RX)PLOAD_SIZE nested attributes */
enum {
SMC_NLA_STATS_PLOAD_PAD,
SMC_NLA_STATS_PLOAD_8K, /* u64 */
SMC_NLA_STATS_PLOAD_16K, /* u64 */
SMC_NLA_STATS_PLOAD_32K, /* u64 */
SMC_NLA_STATS_PLOAD_64K, /* u64 */
SMC_NLA_STATS_PLOAD_128K, /* u64 */
SMC_NLA_STATS_PLOAD_256K, /* u64 */
SMC_NLA_STATS_PLOAD_512K, /* u64 */
SMC_NLA_STATS_PLOAD_1024K, /* u64 */
SMC_NLA_STATS_PLOAD_G_1024K, /* u64 */
__SMC_NLA_STATS_PLOAD_MAX,
SMC_NLA_STATS_PLOAD_MAX = __SMC_NLA_STATS_PLOAD_MAX - 1
};
/* SMC_NLA_STATS_T_TX(RX)_RMB_STATS nested attributes */
enum {
SMC_NLA_STATS_RMB_PAD,
SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT, /* u64 */
SMC_NLA_STATS_RMB_SIZE_SM_CNT, /* u64 */
SMC_NLA_STATS_RMB_FULL_PEER_CNT, /* u64 */
SMC_NLA_STATS_RMB_FULL_CNT, /* u64 */
SMC_NLA_STATS_RMB_REUSE_CNT, /* u64 */
SMC_NLA_STATS_RMB_ALLOC_CNT, /* u64 */
SMC_NLA_STATS_RMB_DGRADE_CNT, /* u64 */
__SMC_NLA_STATS_RMB_MAX,
SMC_NLA_STATS_RMB_MAX = __SMC_NLA_STATS_RMB_MAX - 1
};
/* SMC_NLA_STATS_SMCD_TECH and _SMCR_TECH nested attributes */
enum {
SMC_NLA_STATS_T_PAD,
SMC_NLA_STATS_T_TX_RMB_SIZE, /* nest */
SMC_NLA_STATS_T_RX_RMB_SIZE, /* nest */
SMC_NLA_STATS_T_TXPLOAD_SIZE, /* nest */
SMC_NLA_STATS_T_RXPLOAD_SIZE, /* nest */
SMC_NLA_STATS_T_TX_RMB_STATS, /* nest */
SMC_NLA_STATS_T_RX_RMB_STATS, /* nest */
SMC_NLA_STATS_T_CLNT_V1_SUCC, /* u64 */
SMC_NLA_STATS_T_CLNT_V2_SUCC, /* u64 */
SMC_NLA_STATS_T_SRV_V1_SUCC, /* u64 */
SMC_NLA_STATS_T_SRV_V2_SUCC, /* u64 */
SMC_NLA_STATS_T_SENDPAGE_CNT, /* u64 */
SMC_NLA_STATS_T_SPLICE_CNT, /* u64 */
SMC_NLA_STATS_T_CORK_CNT, /* u64 */
SMC_NLA_STATS_T_NDLY_CNT, /* u64 */
SMC_NLA_STATS_T_URG_DATA_CNT, /* u64 */
SMC_NLA_STATS_T_RX_BYTES, /* u64 */
SMC_NLA_STATS_T_TX_BYTES, /* u64 */
SMC_NLA_STATS_T_RX_CNT, /* u64 */
SMC_NLA_STATS_T_TX_CNT, /* u64 */
__SMC_NLA_STATS_T_MAX,
SMC_NLA_STATS_T_MAX = __SMC_NLA_STATS_T_MAX - 1
};
/* SMC_GEN_STATS attributes */
enum {
SMC_NLA_STATS_PAD,
SMC_NLA_STATS_SMCD_TECH, /* nest */
SMC_NLA_STATS_SMCR_TECH, /* nest */
SMC_NLA_STATS_CLNT_HS_ERR_CNT, /* u64 */
SMC_NLA_STATS_SRV_HS_ERR_CNT, /* u64 */
__SMC_NLA_STATS_MAX,
SMC_NLA_STATS_MAX = __SMC_NLA_STATS_MAX - 1
};
#endif /* _UAPI_LINUX_SMC_H */ #endif /* _UAPI_LINUX_SMC_H */
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "smc_core.h" #include "smc_core.h"
#include "smc_ism.h" #include "smc_ism.h"
#include "smc_ib.h" #include "smc_ib.h"
#include "smc_stats.h"
#include "smc_netlink.h" #include "smc_netlink.h"
#define SMC_CMD_MAX_ATTR 1 #define SMC_CMD_MAX_ATTR 1
...@@ -55,6 +56,11 @@ static const struct genl_ops smc_gen_nl_ops[] = { ...@@ -55,6 +56,11 @@ static const struct genl_ops smc_gen_nl_ops[] = {
/* can be retrieved by unprivileged users */ /* can be retrieved by unprivileged users */
.dumpit = smcr_nl_get_device, .dumpit = smcr_nl_get_device,
}, },
{
.cmd = SMC_NETLINK_GET_STATS,
/* can be retrieved by unprivileged users */
.dumpit = smc_nl_get_stats,
},
}; };
static const struct nla_policy smc_gen_nl_policy[2] = { static const struct nla_policy smc_gen_nl_policy[2] = {
......
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/smc.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include "smc_netlink.h"
#include "smc_stats.h" #include "smc_stats.h"
/* serialize fallback reason statistic gathering */ /* serialize fallback reason statistic gathering */
...@@ -33,3 +37,278 @@ void smc_stats_exit(void) ...@@ -33,3 +37,278 @@ void smc_stats_exit(void)
{ {
free_percpu(smc_stats); free_percpu(smc_stats);
} }
static int smc_nl_fill_stats_rmb_data(struct sk_buff *skb,
struct smc_stats *stats, int tech,
int type)
{
struct smc_stats_rmbcnt *stats_rmb_cnt;
struct nlattr *attrs;
if (type == SMC_NLA_STATS_T_TX_RMB_STATS)
stats_rmb_cnt = &stats->smc[tech].rmb_tx;
else
stats_rmb_cnt = &stats->smc[tech].rmb_rx;
attrs = nla_nest_start(skb, type);
if (!attrs)
goto errout;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_REUSE_CNT,
stats_rmb_cnt->reuse_cnt,
SMC_NLA_STATS_RMB_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_PEER_CNT,
stats_rmb_cnt->buf_size_small_peer_cnt,
SMC_NLA_STATS_RMB_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_SIZE_SM_CNT,
stats_rmb_cnt->buf_size_small_cnt,
SMC_NLA_STATS_RMB_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_PEER_CNT,
stats_rmb_cnt->buf_full_peer_cnt,
SMC_NLA_STATS_RMB_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_FULL_CNT,
stats_rmb_cnt->buf_full_cnt,
SMC_NLA_STATS_RMB_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_ALLOC_CNT,
stats_rmb_cnt->alloc_cnt,
SMC_NLA_STATS_RMB_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_RMB_DGRADE_CNT,
stats_rmb_cnt->dgrade_cnt,
SMC_NLA_STATS_RMB_PAD))
goto errattr;
nla_nest_end(skb, attrs);
return 0;
errattr:
nla_nest_cancel(skb, attrs);
errout:
return -EMSGSIZE;
}
static int smc_nl_fill_stats_bufsize_data(struct sk_buff *skb,
struct smc_stats *stats, int tech,
int type)
{
struct smc_stats_memsize *stats_pload;
struct nlattr *attrs;
if (type == SMC_NLA_STATS_T_TXPLOAD_SIZE)
stats_pload = &stats->smc[tech].tx_pd;
else if (type == SMC_NLA_STATS_T_RXPLOAD_SIZE)
stats_pload = &stats->smc[tech].rx_pd;
else if (type == SMC_NLA_STATS_T_TX_RMB_SIZE)
stats_pload = &stats->smc[tech].tx_rmbsize;
else if (type == SMC_NLA_STATS_T_RX_RMB_SIZE)
stats_pload = &stats->smc[tech].rx_rmbsize;
else
goto errout;
attrs = nla_nest_start(skb, type);
if (!attrs)
goto errout;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_8K,
stats_pload->buf[SMC_BUF_8K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_16K,
stats_pload->buf[SMC_BUF_16K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_32K,
stats_pload->buf[SMC_BUF_32K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_64K,
stats_pload->buf[SMC_BUF_64K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_128K,
stats_pload->buf[SMC_BUF_128K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_256K,
stats_pload->buf[SMC_BUF_256K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_512K,
stats_pload->buf[SMC_BUF_512K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_1024K,
stats_pload->buf[SMC_BUF_1024K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_PLOAD_G_1024K,
stats_pload->buf[SMC_BUF_G_1024K],
SMC_NLA_STATS_PLOAD_PAD))
goto errattr;
nla_nest_end(skb, attrs);
return 0;
errattr:
nla_nest_cancel(skb, attrs);
errout:
return -EMSGSIZE;
}
static int smc_nl_fill_stats_tech_data(struct sk_buff *skb,
struct smc_stats *stats, int tech)
{
struct smc_stats_tech *smc_tech;
struct nlattr *attrs;
smc_tech = &stats->smc[tech];
if (tech == SMC_TYPE_D)
attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCD_TECH);
else
attrs = nla_nest_start(skb, SMC_NLA_STATS_SMCR_TECH);
if (!attrs)
goto errout;
if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
SMC_NLA_STATS_T_TX_RMB_STATS))
goto errattr;
if (smc_nl_fill_stats_rmb_data(skb, stats, tech,
SMC_NLA_STATS_T_RX_RMB_STATS))
goto errattr;
if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
SMC_NLA_STATS_T_TXPLOAD_SIZE))
goto errattr;
if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
SMC_NLA_STATS_T_RXPLOAD_SIZE))
goto errattr;
if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
SMC_NLA_STATS_T_TX_RMB_SIZE))
goto errattr;
if (smc_nl_fill_stats_bufsize_data(skb, stats, tech,
SMC_NLA_STATS_T_RX_RMB_SIZE))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V1_SUCC,
smc_tech->clnt_v1_succ_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CLNT_V2_SUCC,
smc_tech->clnt_v2_succ_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V1_SUCC,
smc_tech->srv_v1_succ_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SRV_V2_SUCC,
smc_tech->srv_v2_succ_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_BYTES,
smc_tech->rx_bytes,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_BYTES,
smc_tech->tx_bytes,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_RX_CNT,
smc_tech->rx_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_TX_CNT,
smc_tech->tx_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT,
smc_tech->sendpage_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT,
smc_tech->cork_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_NDLY_CNT,
smc_tech->ndly_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SPLICE_CNT,
smc_tech->splice_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_URG_DATA_CNT,
smc_tech->urg_data_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
nla_nest_end(skb, attrs);
return 0;
errattr:
nla_nest_cancel(skb, attrs);
errout:
return -EMSGSIZE;
}
int smc_nl_get_stats(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
struct smc_stats *stats;
struct nlattr *attrs;
int cpu, i, size;
void *nlh;
u64 *src;
u64 *sum;
if (cb_ctx->pos[0])
goto errmsg;
nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&smc_gen_nl_family, NLM_F_MULTI,
SMC_NETLINK_GET_STATS);
if (!nlh)
goto errmsg;
attrs = nla_nest_start(skb, SMC_GEN_STATS);
if (!attrs)
goto errnest;
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
if (!stats)
goto erralloc;
size = sizeof(*stats) / sizeof(u64);
for_each_possible_cpu(cpu) {
src = (u64 *)per_cpu_ptr(smc_stats, cpu);
sum = (u64 *)stats;
for (i = 0; i < size; i++)
*(sum++) += *(src++);
}
if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_D))
goto errattr;
if (smc_nl_fill_stats_tech_data(skb, stats, SMC_TYPE_R))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_CLNT_HS_ERR_CNT,
stats->clnt_hshake_err_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
if (nla_put_u64_64bit(skb, SMC_NLA_STATS_SRV_HS_ERR_CNT,
stats->srv_hshake_err_cnt,
SMC_NLA_STATS_PAD))
goto errattr;
nla_nest_end(skb, attrs);
genlmsg_end(skb, nlh);
cb_ctx->pos[0] = 1;
kfree(stats);
return skb->len;
errattr:
kfree(stats);
erralloc:
nla_nest_cancel(skb, attrs);
errnest:
genlmsg_cancel(skb, nlh);
errmsg:
return skb->len;
}
...@@ -247,6 +247,7 @@ do { \ ...@@ -247,6 +247,7 @@ do { \
} \ } \
while (0) while (0)
int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
int smc_stats_init(void) __init; int smc_stats_init(void) __init;
void smc_stats_exit(void); void smc_stats_exit(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment