Commit 7e6a95d3 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-act_police-offload-support'

Jakub Kicinski says:

===================
net: act_police offload support

this set starts by converting cls_matchall to the new flow offload
infrastructure. It so happens that all drivers implementing cls_matchall
offload today also offload cls_flower, so its a little easier for
them to handle the actions in unified flow_rule format, even though
in cls_matchall there is no flow to speak of. If a driver ever appears
which would prefer the old, direct access to TC exts, we can add the
pointer in the offload structure back and support both.

Next the act_police is added to actions supported by flow offload API.

NFP support for act_police offload is added as the final step.  The flower
firmware is configured to perform TX rate limiting in a way which matches
act_police's behaviour.  It does not use DMA.IN back pressure, and
instead	drops packets after they had been already DMAed into the NIC.
IOW it uses our standard traffic policing implementation, future patches
will extend it to other ports and traffic directions.
===================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c8f8207c 5fb5c395
...@@ -1269,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, ...@@ -1269,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
static int static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
const struct tc_action *a, const struct flow_action_entry *act,
bool ingress) bool ingress)
{ {
enum mlxsw_sp_span_type span_type; enum mlxsw_sp_span_type span_type;
struct net_device *to_dev;
to_dev = tcf_mirred_dev(a); if (!act->dev) {
if (!to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL; return -EINVAL;
} }
mirror->ingress = ingress; mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
true, &mirror->span_id); true, &mirror->span_id);
} }
...@@ -1302,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1302,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
static int static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls, struct tc_cls_matchall_offload *cls,
const struct tc_action *a, const struct flow_action_entry *act,
bool ingress) bool ingress)
{ {
int err; int err;
...@@ -1313,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1313,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(mlxsw_sp_port->dev, "sample already active\n"); netdev_err(mlxsw_sp_port->dev, "sample already active\n");
return -EEXIST; return -EEXIST;
} }
if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
tcf_sample_psample_group(a)); act->sample.psample_group);
mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); mlxsw_sp_port->sample->truncate = act->sample.truncate;
mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
mlxsw_sp_port->sample->rate = tcf_sample_rate(a); mlxsw_sp_port->sample->rate = act->sample.rate;
err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
if (err) if (err)
goto err_port_sample_set; goto err_port_sample_set;
return 0; return 0;
...@@ -1350,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1350,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
{ {
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol; __be16 protocol = f->common.protocol;
const struct tc_action *a; struct flow_action_entry *act;
int err; int err;
if (!tcf_exts_has_one_action(f->exts)) { if (!flow_offload_has_one_action(&f->rule->action)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1363,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1363,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM; return -ENOMEM;
mall_tc_entry->cookie = f->cookie; mall_tc_entry->cookie = f->cookie;
a = tcf_exts_first_action(f->exts); act = &f->rule->action.entries[0];
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror; mirror = &mall_tc_entry->mirror;
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
mirror, a, ingress); mirror, act,
} else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { ingress);
} else if (act->id == FLOW_ACTION_SAMPLE &&
protocol == htons(ETH_P_ALL)) {
mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
a, ingress); act, ingress);
} else { } else {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
} }
......
...@@ -43,7 +43,8 @@ nfp-objs += \ ...@@ -43,7 +43,8 @@ nfp-objs += \
flower/match.o \ flower/match.o \
flower/metadata.o \ flower/metadata.o \
flower/offload.o \ flower/offload.o \
flower/tunnel_conf.o flower/tunnel_conf.o \
flower/qos_conf.o
endif endif
ifeq ($(CONFIG_BPF_SYSCALL),y) ifeq ($(CONFIG_BPF_SYSCALL),y)
......
...@@ -278,6 +278,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -278,6 +278,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb); nfp_tunnel_keep_alive(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
......
...@@ -416,6 +416,9 @@ enum nfp_flower_cmsg_type_port { ...@@ -416,6 +416,9 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14, NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_MAX = 32, NFP_FLOWER_CMSG_TYPE_MAX = 32,
}; };
......
...@@ -776,6 +776,9 @@ static int nfp_flower_init(struct nfp_app *app) ...@@ -776,6 +776,9 @@ static int nfp_flower_init(struct nfp_app *app)
nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
} }
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv);
...@@ -799,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app) ...@@ -799,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low); skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work); flush_work(&app_priv->cmsg_work);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag); nfp_flower_lag_cleanup(&app_priv->nfp_lag);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define __NFP_FLOWER_H__ 1 #define __NFP_FLOWER_H__ 1
#include "cmsg.h" #include "cmsg.h"
#include "../nfp_net.h"
#include <linux/circ_buf.h> #include <linux/circ_buf.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
...@@ -39,6 +40,7 @@ struct nfp_app; ...@@ -39,6 +40,7 @@ struct nfp_app;
#define NFP_FL_NBI_MTU_SETTING BIT(1) #define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2) #define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5) #define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31) #define NFP_FL_FEATS_LAG BIT(31)
...@@ -157,6 +159,9 @@ struct nfp_fl_internal_ports { ...@@ -157,6 +159,9 @@ struct nfp_fl_internal_ports {
* @active_mem_unit: Current active memory unit for flower rules * @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules * @total_mem_units: Total number of available memory units for flower rules
* @internal_ports: Internal port ids used in offloaded rules * @internal_ports: Internal port ids used in offloaded rules
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
...@@ -185,6 +190,23 @@ struct nfp_flower_priv { ...@@ -185,6 +190,23 @@ struct nfp_flower_priv {
unsigned int active_mem_unit; unsigned int active_mem_unit;
unsigned int total_mem_units; unsigned int total_mem_units;
struct nfp_fl_internal_ports internal_ports; struct nfp_fl_internal_ports internal_ports;
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
};
/**
* struct nfp_fl_qos - Flower APP priv data for quality of service
* @netdev_port_id: NFP port number of repr with qos info
* @curr_stats: Currently stored stats updates for qos info
* @prev_stats: Previously stored updates for qos info
* @last_update: Stored time when last stats were updated
*/
struct nfp_fl_qos {
u32 netdev_port_id;
struct nfp_stat_pair curr_stats;
struct nfp_stat_pair prev_stats;
u64 last_update;
}; };
/** /**
...@@ -194,6 +216,7 @@ struct nfp_flower_priv { ...@@ -194,6 +216,7 @@ struct nfp_flower_priv {
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
* @offloaded_mac_addr: MAC address that has been offloaded for repr * @offloaded_mac_addr: MAC address that has been offloaded for repr
* @mac_list: List entry of reprs that share the same offloaded MAC * @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos
*/ */
struct nfp_flower_repr_priv { struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr; struct nfp_repr *nfp_repr;
...@@ -201,6 +224,7 @@ struct nfp_flower_repr_priv { ...@@ -201,6 +224,7 @@ struct nfp_flower_repr_priv {
bool mac_offloaded; bool mac_offloaded;
u8 offloaded_mac_addr[ETH_ALEN]; u8 offloaded_mac_addr[ETH_ALEN];
struct list_head mac_list; struct list_head mac_list;
struct nfp_fl_qos qos_table;
}; };
/** /**
...@@ -366,6 +390,11 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app, ...@@ -366,6 +390,11 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct nfp_fl_pre_lag *pre_act); struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app, int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master); struct net_device *master);
void nfp_flower_qos_init(struct nfp_app *app);
void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app, int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev, struct net_device *netdev,
unsigned long event); unsigned long event);
......
...@@ -1185,6 +1185,9 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, ...@@ -1185,6 +1185,9 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev, return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data); type_data);
case TC_SETUP_CLSMATCHALL:
return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
type_data);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/math64.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include "cmsg.h"
#include "main.h"
#include "../nfp_port.h"
#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
struct nfp_police_cfg_head {
__be32 flags_opts;
__be32 port;
};
/* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
* See RFC 2698 for more details.
* ----------------------------------------------------------------
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Flag options |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Port Ingress |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Token Bucket Peak |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Token Bucket Committed |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Peak Burst Size |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Committed Burst Size |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Peak Information Rate |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Committed Information Rate |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
struct nfp_police_config {
struct nfp_police_cfg_head head;
__be32 bkt_tkn_p;
__be32 bkt_tkn_c;
__be32 pbs;
__be32 cbs;
__be32 pir;
__be32 cir;
};
struct nfp_police_stats_reply {
struct nfp_police_cfg_head head;
__be64 pass_bytes;
__be64 pass_pkts;
__be64 drop_bytes;
__be64 drop_pkts;
};
static int
nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
struct netlink_ext_ack *extack)
{
struct flow_action_entry *action = &flow->rule->action.entries[0];
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_config *config;
struct nfp_repr *repr;
struct sk_buff *skb;
u32 netdev_port_id;
u64 burst, rate;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
return -EOPNOTSUPP;
}
repr = netdev_priv(netdev);
if (tcf_block_shared(flow->common.block)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
return -EOPNOTSUPP;
}
if (repr->port->type != NFP_PORT_VF_PORT) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
return -EOPNOTSUPP;
}
if (!flow_offload_has_one_action(&flow->rule->action)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
return -EOPNOTSUPP;
}
if (flow->common.prio != (1 << 16)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
return -EOPNOTSUPP;
}
if (action->id != FLOW_ACTION_POLICE) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
return -EOPNOTSUPP;
}
rate = action->police.rate_bytes_ps;
burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst),
PSCHED_TICKS_PER_SEC);
netdev_port_id = nfp_repr_get_port_id(netdev);
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
if (!skb)
return -ENOMEM;
config = nfp_flower_cmsg_get_data(skb);
memset(config, 0, sizeof(struct nfp_police_config));
config->head.port = cpu_to_be32(netdev_port_id);
config->bkt_tkn_p = cpu_to_be32(burst);
config->bkt_tkn_c = cpu_to_be32(burst);
config->pbs = cpu_to_be32(burst);
config->cbs = cpu_to_be32(burst);
config->pir = cpu_to_be32(rate);
config->cir = cpu_to_be32(rate);
nfp_ctrl_tx(repr->app->ctrl, skb);
repr_priv = repr->app_priv;
repr_priv->qos_table.netdev_port_id = netdev_port_id;
fl_priv->qos_rate_limiters++;
if (fl_priv->qos_rate_limiters == 1)
schedule_delayed_work(&fl_priv->qos_stats_work,
NFP_FL_QOS_UPDATE);
return 0;
}
static int
nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_config *config;
struct nfp_repr *repr;
struct sk_buff *skb;
u32 netdev_port_id;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
return -EOPNOTSUPP;
}
repr = netdev_priv(netdev);
netdev_port_id = nfp_repr_get_port_id(netdev);
repr_priv = repr->app_priv;
if (!repr_priv->qos_table.netdev_port_id) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
return -EOPNOTSUPP;
}
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
if (!skb)
return -ENOMEM;
/* Clear all qos associate data for this interface */
memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
fl_priv->qos_rate_limiters--;
if (!fl_priv->qos_rate_limiters)
cancel_delayed_work_sync(&fl_priv->qos_stats_work);
config = nfp_flower_cmsg_get_data(skb);
memset(config, 0, sizeof(struct nfp_police_config));
config->head.port = cpu_to_be32(netdev_port_id);
nfp_ctrl_tx(repr->app->ctrl, skb);
return 0;
}
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_police_stats_reply *msg;
struct nfp_stat_pair *curr_stats;
struct nfp_stat_pair *prev_stats;
struct net_device *netdev;
struct nfp_repr *repr;
u32 netdev_port_id;
msg = nfp_flower_cmsg_get_data(skb);
netdev_port_id = be32_to_cpu(msg->head.port);
rcu_read_lock();
netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
if (!netdev)
goto exit_unlock_rcu;
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
curr_stats = &repr_priv->qos_table.curr_stats;
prev_stats = &repr_priv->qos_table.prev_stats;
spin_lock_bh(&fl_priv->qos_stats_lock);
curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
be64_to_cpu(msg->drop_pkts);
curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
be64_to_cpu(msg->drop_bytes);
if (!repr_priv->qos_table.last_update) {
prev_stats->pkts = curr_stats->pkts;
prev_stats->bytes = curr_stats->bytes;
}
repr_priv->qos_table.last_update = jiffies;
spin_unlock_bh(&fl_priv->qos_stats_lock);
exit_unlock_rcu:
rcu_read_unlock();
}
static void
nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
u32 netdev_port_id)
{
struct nfp_police_cfg_head *head;
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(fl_priv->app,
sizeof(struct nfp_police_cfg_head),
NFP_FLOWER_CMSG_TYPE_QOS_STATS,
GFP_ATOMIC);
if (!skb)
return;
head = nfp_flower_cmsg_get_data(skb);
memset(head, 0, sizeof(struct nfp_police_cfg_head));
head->port = cpu_to_be32(netdev_port_id);
nfp_ctrl_tx(fl_priv->app->ctrl, skb);
}
static void
nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
{
struct nfp_reprs *repr_set;
int i;
rcu_read_lock();
repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
if (!repr_set)
goto exit_unlock_rcu;
for (i = 0; i < repr_set->num_reprs; i++) {
struct net_device *netdev;
netdev = rcu_dereference(repr_set->reprs[i]);
if (netdev) {
struct nfp_repr *priv = netdev_priv(netdev);
struct nfp_flower_repr_priv *repr_priv;
u32 netdev_port_id;
repr_priv = priv->app_priv;
netdev_port_id = repr_priv->qos_table.netdev_port_id;
if (!netdev_port_id)
continue;
nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
}
}
exit_unlock_rcu:
rcu_read_unlock();
}
static void update_stats_cache(struct work_struct *work)
{
struct delayed_work *delayed_work;
struct nfp_flower_priv *fl_priv;
delayed_work = to_delayed_work(work);
fl_priv = container_of(delayed_work, struct nfp_flower_priv,
qos_stats_work);
nfp_flower_stats_rlim_request_all(fl_priv);
schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
}
static int
nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *fl_priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
struct nfp_stat_pair *curr_stats;
struct nfp_stat_pair *prev_stats;
u64 diff_bytes, diff_pkts;
struct nfp_repr *repr;
if (!nfp_netdev_is_nfp_repr(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
return -EOPNOTSUPP;
}
repr = netdev_priv(netdev);
repr_priv = repr->app_priv;
if (!repr_priv->qos_table.netdev_port_id) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
return -EOPNOTSUPP;
}
spin_lock_bh(&fl_priv->qos_stats_lock);
curr_stats = &repr_priv->qos_table.curr_stats;
prev_stats = &repr_priv->qos_table.prev_stats;
diff_pkts = curr_stats->pkts - prev_stats->pkts;
diff_bytes = curr_stats->bytes - prev_stats->bytes;
prev_stats->pkts = curr_stats->pkts;
prev_stats->bytes = curr_stats->bytes;
spin_unlock_bh(&fl_priv->qos_stats_lock);
flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
repr_priv->qos_table.last_update);
return 0;
}
void nfp_flower_qos_init(struct nfp_app *app)
{
struct nfp_flower_priv *fl_priv = app->priv;
spin_lock_init(&fl_priv->qos_stats_lock);
INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
}
void nfp_flower_qos_cleanup(struct nfp_app *app)
{
struct nfp_flower_priv *fl_priv = app->priv;
cancel_delayed_work_sync(&fl_priv->qos_stats_work);
}
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow)
{
struct netlink_ext_ack *extack = flow->common.extack;
struct nfp_flower_priv *fl_priv = app->priv;
if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
return -EOPNOTSUPP;
}
switch (flow->command) {
case TC_CLSMATCHALL_REPLACE:
return nfp_flower_install_rate_limiter(app, netdev, flow,
extack);
case TC_CLSMATCHALL_DESTROY:
return nfp_flower_remove_rate_limiter(app, netdev, flow,
extack);
case TC_CLSMATCHALL_STATS:
return nfp_flower_stats_rate_limiter(app, netdev, flow,
extack);
default:
return -EOPNOTSUPP;
}
}
...@@ -118,6 +118,8 @@ enum flow_action_id { ...@@ -118,6 +118,8 @@ enum flow_action_id {
FLOW_ACTION_MARK, FLOW_ACTION_MARK,
FLOW_ACTION_WAKE, FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE, FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
FLOW_ACTION_POLICE,
}; };
/* This is mirroring enum pedit_header_type definition for easy mapping between /* This is mirroring enum pedit_header_type definition for easy mapping between
...@@ -157,6 +159,16 @@ struct flow_action_entry { ...@@ -157,6 +159,16 @@ struct flow_action_entry {
u32 index; u32 index;
u8 vf; u8 vf;
} queue; } queue;
struct { /* FLOW_ACTION_SAMPLE */
struct psample_group *psample_group;
u32 rate;
u32 trunc_size;
bool truncate;
} sample;
struct { /* FLOW_ACTION_POLICE */
s64 burst;
u64 rate_bytes_ps;
} police;
}; };
}; };
...@@ -170,6 +182,17 @@ static inline bool flow_action_has_entries(const struct flow_action *action) ...@@ -170,6 +182,17 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
return action->num_entries; return action->num_entries;
} }
/**
* flow_action_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action
*
* Returns true if exactly one action is present.
*/
static inline bool flow_offload_has_one_action(const struct flow_action *action)
{
return action->num_entries == 1;
}
#define flow_action_for_each(__i, __act, __actions) \ #define flow_action_for_each(__i, __act, __actions) \
for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
......
...@@ -100,6 +100,11 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -100,6 +100,11 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode); struct tcf_result *res, bool compat_mode);
#else #else
static inline bool tcf_block_shared(struct tcf_block *block)
{
return false;
}
static inline static inline
int tcf_block_get(struct tcf_block **p_block, int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
...@@ -371,30 +376,6 @@ static inline bool tcf_exts_has_actions(struct tcf_exts *exts) ...@@ -371,30 +376,6 @@ static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
#endif #endif
} }
/**
* tcf_exts_has_one_action - check if exactly one action is present
* @exts: tc filter extensions handle
*
* Returns true if exactly one action is present.
*/
static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->nr_actions == 1;
#else
return false;
#endif
}
static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->actions[0];
#else
return NULL;
#endif
}
/** /**
* tcf_exts_exec - execute tc filter extensions * tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer * @skb: socket buffer
...@@ -648,6 +629,7 @@ struct tc_cls_common_offload { ...@@ -648,6 +629,7 @@ struct tc_cls_common_offload {
u32 chain_index; u32 chain_index;
__be16 protocol; __be16 protocol;
u32 prio; u32 prio;
struct tcf_block *block;
struct netlink_ext_ack *extack; struct netlink_ext_ack *extack;
}; };
...@@ -749,11 +731,13 @@ static inline bool tc_in_hw(u32 flags) ...@@ -749,11 +731,13 @@ static inline bool tc_in_hw(u32 flags)
static inline void static inline void
tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
const struct tcf_proto *tp, u32 flags, const struct tcf_proto *tp, u32 flags,
struct tcf_block *block,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
cls_common->chain_index = tp->chain->index; cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol; cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio; cls_common->prio = tp->prio;
cls_common->block = block;
if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack; cls_common->extack = extack;
} }
...@@ -784,12 +768,14 @@ tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) ...@@ -784,12 +768,14 @@ tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
enum tc_matchall_command { enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE, TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY, TC_CLSMATCHALL_DESTROY,
TC_CLSMATCHALL_STATS,
}; };
struct tc_cls_matchall_offload { struct tc_cls_matchall_offload {
struct tc_cls_common_offload common; struct tc_cls_common_offload common;
enum tc_matchall_command command; enum tc_matchall_command command;
struct tcf_exts *exts; struct flow_rule *rule;
struct flow_stats stats;
unsigned long cookie; unsigned long cookie;
}; };
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NET_TC_POLICE_H
#define __NET_TC_POLICE_H
#include <net/act_api.h>
struct tcf_police_params {
int tcfp_result;
u32 tcfp_ewma_rate;
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
struct rcu_head rcu;
};
struct tcf_police {
struct tc_action common;
struct tcf_police_params __rcu *params;
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
s64 tcfp_t_c;
};
#define to_police(pc) ((struct tcf_police *)pc)
/* old policer structure from before tc actions */
struct tc_police_compat {
u32 index;
int action;
u32 limit;
u32 burst;
u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
};
static inline bool is_tcf_police(const struct tc_action *act)
{
#ifdef CONFIG_NET_CLS_ACT
if (act->ops && act->ops->id == TCA_ID_POLICE)
return true;
#endif
return false;
}
static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
params = rcu_dereference_bh(police->params);
return params->rate.rate_bytes_ps;
}
static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
params = rcu_dereference_bh(police->params);
return params->tcfp_burst;
}
#endif /* __NET_TC_POLICE_H */
...@@ -778,27 +778,25 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, ...@@ -778,27 +778,25 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
struct dsa_mall_tc_entry *mall_tc_entry; struct dsa_mall_tc_entry *mall_tc_entry;
__be16 protocol = cls->common.protocol; __be16 protocol = cls->common.protocol;
struct dsa_switch *ds = dp->ds; struct dsa_switch *ds = dp->ds;
struct net_device *to_dev; struct flow_action_entry *act;
const struct tc_action *a;
struct dsa_port *to_dp; struct dsa_port *to_dp;
int err = -EOPNOTSUPP; int err = -EOPNOTSUPP;
if (!ds->ops->port_mirror_add) if (!ds->ops->port_mirror_add)
return err; return err;
if (!tcf_exts_has_one_action(cls->exts)) if (!flow_offload_has_one_action(&cls->rule->action))
return err; return err;
a = tcf_exts_first_action(cls->exts); act = &cls->rule->action.entries[0];
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct dsa_mall_mirror_tc_entry *mirror; struct dsa_mall_mirror_tc_entry *mirror;
to_dev = tcf_mirred_dev(a); if (!act->dev)
if (!to_dev)
return -EINVAL; return -EINVAL;
if (!dsa_slave_dev_check(to_dev)) if (!dsa_slave_dev_check(act->dev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
...@@ -809,7 +807,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, ...@@ -809,7 +807,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
mall_tc_entry->type = DSA_PORT_MALL_MIRROR; mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror; mirror = &mall_tc_entry->mirror;
to_dp = dsa_slave_to_port(to_dev); to_dp = dsa_slave_to_port(act->dev);
mirror->to_local_port = to_dp->index; mirror->to_local_port = to_dp->index;
mirror->ingress = ingress; mirror->ingress = ingress;
......
...@@ -22,42 +22,7 @@ ...@@ -22,42 +22,7 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/netlink.h> #include <net/netlink.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tc_act/tc_police.h>
struct tcf_police_params {
int tcfp_result;
u32 tcfp_ewma_rate;
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
struct rcu_head rcu;
};
struct tcf_police {
struct tc_action common;
struct tcf_police_params __rcu *params;
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
s64 tcfp_t_c;
};
#define to_police(pc) ((struct tcf_police *)pc)
/* old policer structure from before tc actions */
struct tc_police_compat {
u32 index;
int action;
u32 limit;
u32 burst;
u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
};
/* Each policer is serialized by its individual spinlock */ /* Each policer is serialized by its individual spinlock */
...@@ -317,6 +282,20 @@ static void tcf_police_cleanup(struct tc_action *a) ...@@ -317,6 +282,20 @@ static void tcf_police_cleanup(struct tc_action *a)
kfree_rcu(p, rcu); kfree_rcu(p, rcu);
} }
static void tcf_police_stats_update(struct tc_action *a,
u64 bytes, u32 packets,
u64 lastuse, bool hw)
{
struct tcf_police *police = to_police(a);
struct tcf_t *tm = &police->tcf_tm;
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
if (hw)
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
bytes, packets);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref) int bind, int ref)
{ {
...@@ -380,6 +359,7 @@ static struct tc_action_ops act_police_ops = { ...@@ -380,6 +359,7 @@ static struct tc_action_ops act_police_ops = {
.kind = "police", .kind = "police",
.id = TCA_ID_POLICE, .id = TCA_ID_POLICE,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.stats_update = tcf_police_stats_update,
.act = tcf_police_act, .act = tcf_police_act,
.dump = tcf_police_dump, .dump = tcf_police_dump,
.init = tcf_police_init, .init = tcf_police_init,
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_police.h>
#include <net/tc_act/tc_sample.h>
#include <net/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h>
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
...@@ -3257,6 +3259,18 @@ int tc_setup_flow_action(struct flow_action *flow_action, ...@@ -3257,6 +3259,18 @@ int tc_setup_flow_action(struct flow_action *flow_action,
} else if (is_tcf_skbedit_mark(act)) { } else if (is_tcf_skbedit_mark(act)) {
entry->id = FLOW_ACTION_MARK; entry->id = FLOW_ACTION_MARK;
entry->mark = tcf_skbedit_mark(act); entry->mark = tcf_skbedit_mark(act);
} else if (is_tcf_sample(act)) {
entry->id = FLOW_ACTION_SAMPLE;
entry->sample.psample_group =
tcf_sample_psample_group(act);
entry->sample.trunc_size = tcf_sample_trunc_size(act);
entry->sample.truncate = tcf_sample_truncate(act);
entry->sample.rate = tcf_sample_rate(act);
} else if (is_tcf_police(act)) {
entry->id = FLOW_ACTION_POLICE;
entry->police.burst = tcf_police_tcfp_burst(act);
entry->police.rate_bytes_ps =
tcf_police_rate_bytes_ps(act);
} else { } else {
goto err_out; goto err_out;
} }
......
...@@ -157,7 +157,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, ...@@ -157,7 +157,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
skip_sw = prog && tc_skip_sw(prog->gen_flags); skip_sw = prog && tc_skip_sw(prog->gen_flags);
obj = prog ?: oldprog; obj = prog ?: oldprog;
tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, block,
extack); extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &obj->exts; cls_bpf.exts = &obj->exts;
...@@ -227,7 +227,8 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp, ...@@ -227,7 +227,8 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_bpf_offload cls_bpf = {}; struct tc_cls_bpf_offload cls_bpf = {};
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL); tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, block,
NULL);
cls_bpf.command = TC_CLSBPF_STATS; cls_bpf.command = TC_CLSBPF_STATS;
cls_bpf.exts = &prog->exts; cls_bpf.exts = &prog->exts;
cls_bpf.prog = prog->filter; cls_bpf.prog = prog->filter;
...@@ -669,7 +670,7 @@ static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -669,7 +670,7 @@ static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
continue; continue;
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
extack); block, extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &prog->exts; cls_bpf.exts = &prog->exts;
cls_bpf.prog = add ? prog->filter : NULL; cls_bpf.prog = add ? prog->filter : NULL;
......
...@@ -389,7 +389,8 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, ...@@ -389,7 +389,8 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
if (!rtnl_held) if (!rtnl_held)
rtnl_lock(); rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block,
extack);
cls_flower.command = TC_CLSFLOWER_DESTROY; cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
...@@ -422,7 +423,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, ...@@ -422,7 +423,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
goto errout; goto errout;
} }
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block,
extack);
cls_flower.command = TC_CLSFLOWER_REPLACE; cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
cls_flower.rule->match.dissector = &f->mask->dissector; cls_flower.rule->match.dissector = &f->mask->dissector;
...@@ -478,7 +480,8 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, ...@@ -478,7 +480,8 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
if (!rtnl_held) if (!rtnl_held)
rtnl_lock(); rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block,
NULL);
cls_flower.command = TC_CLSFLOWER_STATS; cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
cls_flower.classid = f->res.classid; cls_flower.classid = f->res.classid;
...@@ -1757,7 +1760,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -1757,7 +1760,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
} }
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
extack); block, extack);
cls_flower.command = add ? cls_flower.command = add ?
TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY; TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long)f; cls_flower.cookie = (unsigned long)f;
......
...@@ -71,7 +71,8 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, ...@@ -71,7 +71,8 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
struct tc_cls_matchall_offload cls_mall = {}; struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
extack);
cls_mall.command = TC_CLSMATCHALL_DESTROY; cls_mall.command = TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = cookie; cls_mall.cookie = cookie;
...@@ -89,12 +90,30 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, ...@@ -89,12 +90,30 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
bool skip_sw = tc_skip_sw(head->flags); bool skip_sw = tc_skip_sw(head->flags);
int err; int err;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
if (!cls_mall.rule)
return -ENOMEM;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
extack);
cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.exts = &head->exts;
cls_mall.cookie = cookie; cls_mall.cookie = cookie;
err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
mall_destroy_hw_filter(tp, head, cookie, NULL);
if (skip_sw)
NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
else
err = 0;
return err;
}
err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw); err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw);
kfree(cls_mall.rule);
if (err < 0) { if (err < 0) {
mall_destroy_hw_filter(tp, head, cookie, NULL); mall_destroy_hw_filter(tp, head, cookie, NULL);
return err; return err;
...@@ -272,13 +291,28 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -272,13 +291,28 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
if (tc_skip_hw(head->flags)) if (tc_skip_hw(head->flags))
return 0; return 0;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
if (!cls_mall.rule)
return -ENOMEM;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
extack);
cls_mall.command = add ? cls_mall.command = add ?
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
cls_mall.exts = &head->exts;
cls_mall.cookie = (unsigned long)head; cls_mall.cookie = (unsigned long)head;
err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
if (add && tc_skip_sw(head->flags)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
return err;
}
}
err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv); err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
kfree(cls_mall.rule);
if (err) { if (err) {
if (add && tc_skip_sw(head->flags)) if (add && tc_skip_sw(head->flags))
return err; return err;
...@@ -290,6 +324,24 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -290,6 +324,24 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
return 0; return 0;
} }
static void mall_stats_hw_filter(struct tcf_proto *tp,
struct cls_mall_head *head,
unsigned long cookie)
{
struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
NULL);
cls_mall.command = TC_CLSMATCHALL_STATS;
cls_mall.cookie = cookie;
tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
cls_mall.stats.pkts, cls_mall.stats.lastused);
}
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{ {
...@@ -301,6 +353,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, ...@@ -301,6 +353,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (!head) if (!head)
return skb->len; return skb->len;
if (!tc_skip_hw(head->flags))
mall_stats_hw_filter(tp, head, (unsigned long)head);
t->tcm_handle = head->handle; t->tcm_handle = head->handle;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS); nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
......
...@@ -485,7 +485,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, ...@@ -485,7 +485,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, block,
extack);
cls_u32.command = TC_CLSU32_DELETE_HNODE; cls_u32.command = TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
...@@ -503,7 +504,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, ...@@ -503,7 +504,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
bool offloaded = false; bool offloaded = false;
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack);
cls_u32.command = TC_CLSU32_NEW_HNODE; cls_u32.command = TC_CLSU32_NEW_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
...@@ -529,7 +530,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -529,7 +530,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block,
extack);
cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
...@@ -546,7 +548,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -546,7 +548,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool skip_sw = tc_skip_sw(flags); bool skip_sw = tc_skip_sw(flags);
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack);
cls_u32.command = TC_CLSU32_REPLACE_KNODE; cls_u32.command = TC_CLSU32_REPLACE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
cls_u32.knode.fshift = n->fshift; cls_u32.knode.fshift = n->fshift;
...@@ -1170,10 +1172,12 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, ...@@ -1170,10 +1172,12 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
bool add, tc_setup_cb_t *cb, void *cb_priv, bool add, tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, block,
extack);
cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = ht->divisor; cls_u32.hnode.divisor = ht->divisor;
cls_u32.hnode.handle = ht->handle; cls_u32.hnode.handle = ht->handle;
...@@ -1195,7 +1199,8 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -1195,7 +1199,8 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block,
extack);
cls_u32.command = add ? cls_u32.command = add ?
TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment