Commit a46e3d5e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'nfp-flow-independent-tc-action-hardware-offload'

Simon Horman says:

====================
nfp: flow-independent tc action hardware offload

Baowen Zheng says:

Allow nfp NIC to offload tc actions independent of flows.

The motivation for this work is to offload tc actions independent of flows
for nfp NIC. We allow nfp driver to provide hardware offload of OVS
metering feature - which calls for policers that may be used by multiple
flows and whose lifecycle is independent of any flows that use them.

When nfp driver tries to offload a flow table using the independent action,
the driver will search if the action is already offloaded to the hardware.
If not, the flow table offload will fail.

When the nfp NIC successes to offload an action, the user can check
in_hw_count when dumping the tc action.

Tc cli command to offload and dump an action:

 # tc actions add action police rate 100mbit burst 10000k index 200 skip_sw

 # tc -s -d actions list action police

 total acts 1

      action order 0:  police 0xc8 rate 100Mbit burst 10000Kb mtu 2Kb action reclassify
      overhead 0b linklayer ethernet
      ref 1 bind 0  installed 142 sec used 0 sec
      Action statistics:
      Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0)
      backlog 0b 0p requeues 0
      skip_sw in_hw in_hw_count 1
      used_hw_stats delayed
====================

Link: https://lore.kernel.org/r/20220223162302.97609-1-simon.horman@corigine.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 7bbb765b 5e98743c
......@@ -922,6 +922,51 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
}
static struct nfp_fl_meter *nfp_fl_meter(char *act_data)
{
size_t act_size = sizeof(struct nfp_fl_meter);
struct nfp_fl_meter *meter_act;
meter_act = (struct nfp_fl_meter *)act_data;
memset(meter_act, 0, act_size);
meter_act->head.jump_id = NFP_FL_ACTION_OPCODE_METER;
meter_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
return meter_act;
}
static int
nfp_flower_meter_action(struct nfp_app *app,
const struct flow_action_entry *action,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct nfp_fl_meter *fl_meter;
u32 meter_id;
if (*a_len + sizeof(struct nfp_fl_meter) > NFP_FL_MAX_A_SIZ) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload:meter action size beyond the allowed maximum");
return -EOPNOTSUPP;
}
meter_id = action->hw_index;
if (!nfp_flower_search_meter_entry(app, meter_id)) {
NL_SET_ERR_MSG_MOD(extack,
"can not offload flow table with unsupported police action.\n");
return -EOPNOTSUPP;
}
fl_meter = nfp_fl_meter(&nfp_fl->action_data[*a_len]);
*a_len += sizeof(struct nfp_fl_meter);
fl_meter->meter_id = cpu_to_be32(meter_id);
return 0;
}
static int
nfp_flower_output_action(struct nfp_app *app,
const struct flow_action_entry *act,
......@@ -985,6 +1030,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_set_tun *set_tun;
struct nfp_fl_push_vlan *psh_v;
......@@ -1149,6 +1195,18 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*pkt_host = true;
break;
case FLOW_ACTION_POLICE:
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_QOS_METER)) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: unsupported police action in action list");
return -EOPNOTSUPP;
}
err = nfp_flower_meter_action(app, act, nfp_fl, a_len, netdev,
extack);
if (err)
return err;
break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
......
......@@ -85,6 +85,7 @@
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
#define NFP_FL_ACTION_OPCODE_METER 24
#define NFP_FL_ACTION_OPCODE_PUSH_GENEVE 26
#define NFP_FL_ACTION_OPCODE_NUM 32
......@@ -260,6 +261,12 @@ struct nfp_fl_set_mpls {
__be32 lse;
};
struct nfp_fl_meter {
struct nfp_fl_act_head head;
__be16 reserved;
__be32 meter_id;
};
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
......
......@@ -12,7 +12,9 @@
#include <linux/rhashtable.h>
#include <linux/time64.h>
#include <linux/types.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <linux/workqueue.h>
#include <linux/idr.h>
......@@ -48,6 +50,7 @@ struct nfp_app;
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_QOS_PPS BIT(9)
#define NFP_FL_FEATS_QOS_METER BIT(10)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
......@@ -63,7 +66,8 @@ struct nfp_app;
NFP_FL_FEATS_PRE_TUN_RULES | \
NFP_FL_FEATS_IPV6_TUN | \
NFP_FL_FEATS_VLAN_QINQ | \
NFP_FL_FEATS_QOS_PPS)
NFP_FL_FEATS_QOS_PPS | \
NFP_FL_FEATS_QOS_METER)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
......@@ -191,6 +195,8 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
* @meter_stats_lock: Lock on meter stats updates
* @meter_table: Hash table used to store the meter table
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
* @ct_zone_table: Hash table used to store the different zones
......@@ -228,6 +234,8 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
struct mutex meter_stats_lock; /* Protect the meter stats */
struct rhashtable meter_table;
int pre_tun_rule_cnt;
struct rhashtable merge_table;
struct rhashtable ct_zone_table;
......@@ -374,6 +382,31 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
struct nfp_meter_stats_entry {
u64 pkts;
u64 bytes;
u64 drops;
};
struct nfp_meter_entry {
struct rhash_head ht_node;
u32 meter_id;
bool bps;
u32 rate;
u32 burst;
u64 used;
struct nfp_meter_stats {
u64 update;
struct nfp_meter_stats_entry curr;
struct nfp_meter_stats_entry prev;
} stats;
};
enum nfp_meter_op {
NFP_METER_ADD,
NFP_METER_DEL,
};
static inline bool
nfp_flower_internal_port_can_offload(struct nfp_app *app,
struct net_device *netdev)
......@@ -569,4 +602,18 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
void
nfp_flower_update_merge_stats(struct nfp_app *app,
struct nfp_fl_payload *sub_flow);
int nfp_setup_tc_act_offload(struct nfp_app *app,
struct flow_offload_action *fl_act);
int nfp_init_meter_table(struct nfp_app *app);
void nfp_flower_stats_meter_request_all(struct nfp_flower_priv *fl_priv);
void nfp_act_stats_reply(struct nfp_app *app, void *pmsg);
int nfp_flower_offload_one_police(struct nfp_app *app, bool ingress,
bool pps, u32 id, u32 rate, u32 burst);
int nfp_flower_setup_meter_entry(struct nfp_app *app,
const struct flow_action_entry *action,
enum nfp_meter_op op,
u32 meter_id);
struct nfp_meter_entry *
nfp_flower_search_meter_entry(struct nfp_app *app, u32 meter_id);
#endif
......@@ -1861,6 +1861,20 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
return 0;
}
static int
nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data)
{
if (!data)
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_ACT:
return nfp_setup_tc_act_offload(app, data);
default:
return -EOPNOTSUPP;
}
}
int
nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
......@@ -1868,7 +1882,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
return -EOPNOTSUPP;
return nfp_setup_tc_no_dev(cb_priv, type, data);
if (!nfp_fl_is_netdev_to_offload(netdev))
return -EOPNOTSUPP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment