Commit 6e2345c1 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-expose-HW-stats-types-per-action-used-by-drivers'

Jiri Pirko says:

====================
net: sched: expose HW stats types per action used by drivers

The first patch is just adding a helper used by the second patch too.
The second patch is exposing HW stats types that are used by drivers.

Example:

$ tc filter add dev enp3s0np1 ingress proto ip handle 1 pref 1 flower dst_ip 192.168.1.1 action drop
$ tc -s filter show dev enp3s0np1 ingress
filter protocol ip pref 1 flower chain 0
filter protocol ip pref 1 flower chain 0 handle 0x1
  eth_type ipv4
  dst_ip 192.168.1.1
  in_hw in_hw_count 2
        action order 1: gact action drop
         random type none pass val 0
         index 1 ref 1 bind 1 installed 10 sec used 10 sec
        Action statistics:
        Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
        backlog 0b 0p requeues 0
        used_hw_stats immediate     <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents acc086bf 93a129eb
......@@ -1639,7 +1639,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
spin_unlock(&flow->stats_lock);
flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
lastused);
lastused, FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
......
......@@ -903,7 +903,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
ofld_stats->last_used = jiffies;
flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
packets - ofld_stats->packet_count,
ofld_stats->last_used);
ofld_stats->last_used,
FLOW_ACTION_HW_STATS_IMMEDIATE);
ofld_stats->packet_count = packets;
ofld_stats->byte_count = bytes;
......
......@@ -346,7 +346,8 @@ int cxgb4_tc_matchall_stats(struct net_device *dev,
flow_stats_update(&cls_matchall->stats,
bytes - tc_port_matchall->ingress.bytes,
packets - tc_port_matchall->ingress.packets,
tc_port_matchall->ingress.last_used);
tc_port_matchall->ingress.last_used,
FLOW_ACTION_HW_STATS_IMMEDIATE);
tc_port_matchall->ingress.packets = packets;
tc_port_matchall->ingress.bytes = bytes;
......
......@@ -666,7 +666,8 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
return -ENOENT;
mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, lastuse);
flow_stats_update(&f->stats, bytes, packets, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
......
......@@ -4468,7 +4468,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
flow_stats_update(&f->stats, bytes, packets, lastuse);
flow_stats_update(&f->stats, bytes, packets, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
trace_mlx5e_stats_flower(f);
errout:
mlx5e_flow_put(priv, flow);
......@@ -4585,7 +4586,8 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
FLOW_ACTION_HW_STATS_DELAYED);
}
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
......
......@@ -786,7 +786,8 @@ struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
u64 *packets, u64 *bytes, u64 *last_use,
enum flow_action_hw_stats *used_hw_stats);
struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
......
......@@ -967,7 +967,8 @@ static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use)
u64 *packets, u64 *bytes, u64 *last_use,
enum flow_action_hw_stats *used_hw_stats)
{
struct mlxsw_sp_acl_rule_info *rulei;
......@@ -982,6 +983,7 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
&current_bytes);
if (err)
return err;
*used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
}
*packets = current_packets - rule->last_packets;
*bytes = current_bytes - rule->last_bytes;
......
......@@ -571,6 +571,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct flow_cls_offload *f)
{
enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
u64 packets;
......@@ -589,11 +590,11 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
&lastuse);
&lastuse, &used_hw_stats);
if (err)
goto err_rule_get_stats;
flow_stats_update(&f->stats, bytes, packets, lastuse);
flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats);
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
......
......@@ -224,7 +224,8 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
if (ret)
return ret;
flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0);
flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats);
......
......@@ -1490,7 +1490,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
nfp_flower_update_merge_stats(app, nfp_flow);
flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
priv->stats[ctx_id].pkts, priv->stats[ctx_id].used,
FLOW_ACTION_HW_STATS_DELAYED);
priv->stats[ctx_id].pkts = 0;
priv->stats[ctx_id].bytes = 0;
......
......@@ -320,7 +320,8 @@ nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
spin_unlock_bh(&fl_priv->qos_stats_lock);
flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
repr_priv->qos_table.last_update);
repr_priv->qos_table.last_update,
FLOW_ACTION_HW_STATS_DELAYED);
return 0;
}
......
......@@ -42,6 +42,8 @@ struct tc_action {
struct tcf_chain __rcu *goto_chain;
u32 tcfa_flags;
u8 hw_stats;
u8 used_hw_stats;
bool used_hw_stats_valid;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
......
......@@ -370,14 +370,24 @@ struct flow_stats {
u64 pkts;
u64 bytes;
u64 lastused;
enum flow_action_hw_stats used_hw_stats;
bool used_hw_stats_valid;
};
static inline void flow_stats_update(struct flow_stats *flow_stats,
u64 bytes, u64 pkts, u64 lastused)
u64 bytes, u64 pkts, u64 lastused,
enum flow_action_hw_stats used_hw_stats)
{
flow_stats->pkts += pkts;
flow_stats->bytes += bytes;
flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
/* The driver should pass value with a maximum of one bit set.
* Passing FLOW_ACTION_HW_STATS_ANY is invalid.
*/
WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
flow_stats->used_hw_stats |= used_hw_stats;
flow_stats->used_hw_stats_valid = true;
}
enum flow_block_command {
......
......@@ -1465,6 +1465,21 @@ static inline int nla_put_in6_addr(struct sk_buff *skb, int attrtype,
return nla_put(skb, attrtype, sizeof(*addr), addr);
}
/**
* nla_put_bitfield32 - Add a bitfield32 netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @value: value carrying bits
* @selector: selector of valid bits
*/
static inline int nla_put_bitfield32(struct sk_buff *skb, int attrtype,
__u32 value, __u32 selector)
{
struct nla_bitfield32 tmp = { value, selector, };
return nla_put(skb, attrtype, sizeof(tmp), &tmp);
}
/**
* nla_get_u32 - return payload of u32 attribute
* @nla: u32 netlink attribute
......
......@@ -262,7 +262,8 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
static inline void
tcf_exts_stats_update(const struct tcf_exts *exts,
u64 bytes, u64 packets, u64 lastuse)
u64 bytes, u64 packets, u64 lastuse,
u8 used_hw_stats, bool used_hw_stats_valid)
{
#ifdef CONFIG_NET_CLS_ACT
int i;
......@@ -273,6 +274,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
struct tc_action *a = exts->actions[i];
tcf_action_stats_update(a, bytes, packets, lastuse, true);
a->used_hw_stats = used_hw_stats;
a->used_hw_stats_valid = used_hw_stats_valid;
}
preempt_enable();
......
......@@ -18,6 +18,7 @@ enum {
TCA_ACT_COOKIE,
TCA_ACT_FLAGS,
TCA_ACT_HW_STATS,
TCA_ACT_USED_HW_STATS,
__TCA_ACT_MAX
};
......
......@@ -789,23 +789,20 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
}
rcu_read_unlock();
if (a->hw_stats != TCA_ACT_HW_STATS_ANY) {
struct nla_bitfield32 hw_stats = {
a->hw_stats,
TCA_ACT_HW_STATS_ANY,
};
if (nla_put(skb, TCA_ACT_HW_STATS, sizeof(hw_stats), &hw_stats))
goto nla_put_failure;
}
if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
a->hw_stats, TCA_ACT_HW_STATS_ANY))
goto nla_put_failure;
if (a->tcfa_flags) {
struct nla_bitfield32 flags = { a->tcfa_flags,
a->tcfa_flags, };
if (a->used_hw_stats_valid &&
nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
goto nla_put_failure;
if (nla_put(skb, TCA_ACT_FLAGS, sizeof(flags), &flags))
goto nla_put_failure;
}
if (a->tcfa_flags &&
nla_put_bitfield32(skb, TCA_ACT_FLAGS,
a->tcfa_flags, a->tcfa_flags))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
if (nest == NULL)
......
......@@ -492,7 +492,9 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
cls_flower.stats.pkts,
cls_flower.stats.lastused);
cls_flower.stats.lastused,
cls_flower.stats.used_hw_stats,
cls_flower.stats.used_hw_stats_valid);
}
static void __fl_put(struct cls_fl_filter *f)
......
......@@ -338,7 +338,9 @@ static void mall_stats_hw_filter(struct tcf_proto *tp,
tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
cls_mall.stats.pkts, cls_mall.stats.lastused);
cls_mall.stats.pkts, cls_mall.stats.lastused,
cls_mall.stats.used_hw_stats,
cls_mall.stats.used_hw_stats_valid);
}
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
......
......@@ -349,10 +349,6 @@ static int red_dump_offload_stats(struct Qdisc *sch)
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct red_sched_data *q = qdisc_priv(sch);
struct nla_bitfield32 flags_bf = {
.selector = red_supported_flags,
.value = q->flags,
};
struct nlattr *opts = NULL;
struct tc_red_qopt opt = {
.limit = q->limit,
......@@ -375,7 +371,8 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
goto nla_put_failure;
if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
nla_put(skb, TCA_RED_FLAGS, sizeof(flags_bf), &flags_bf))
nla_put_bitfield32(skb, TCA_RED_FLAGS,
q->flags, red_supported_flags))
goto nla_put_failure;
return nla_nest_end(skb, opts);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment