Commit 7e6a95d3 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-act_police-offload-support'

Jakub Kicinski says:

===================
net: act_police offload support

this set starts by converting cls_matchall to the new flow offload
infrastructure. It so happens that all drivers implementing cls_matchall
offload today also offload cls_flower, so its a little easier for
them to handle the actions in unified flow_rule format, even though
in cls_matchall there is no flow to speak of. If a driver ever appears
which would prefer the old, direct access to TC exts, we can add the
pointer in the offload structure back and support both.

Next the act_police is added to actions supported by flow offload API.

NFP support for act_police offload is added as the final step.  The flower
firmware is configured to perform TX rate limiting in a way which matches
act_police's behaviour.  It does not use DMA.IN back pressure, and
instead	drops packets after they had been already DMAed into the NIC.
IOW it uses our standard traffic policing implementation, future patches
will extend it to other ports and traffic directions.
===================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c8f8207c 5fb5c395
...@@ -1269,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port, ...@@ -1269,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
static int static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror, struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
const struct tc_action *a, const struct flow_action_entry *act,
bool ingress) bool ingress)
{ {
enum mlxsw_sp_span_type span_type; enum mlxsw_sp_span_type span_type;
struct net_device *to_dev;
to_dev = tcf_mirred_dev(a); if (!act->dev) {
if (!to_dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL; return -EINVAL;
} }
mirror->ingress = ingress; mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type, return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
true, &mirror->span_id); true, &mirror->span_id);
} }
...@@ -1302,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1302,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
static int static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls, struct tc_cls_matchall_offload *cls,
const struct tc_action *a, const struct flow_action_entry *act,
bool ingress) bool ingress)
{ {
int err; int err;
...@@ -1313,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1313,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(mlxsw_sp_port->dev, "sample already active\n"); netdev_err(mlxsw_sp_port->dev, "sample already active\n");
return -EEXIST; return -EEXIST;
} }
if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) { if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n"); netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
rcu_assign_pointer(mlxsw_sp_port->sample->psample_group, rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
tcf_sample_psample_group(a)); act->sample.psample_group);
mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a); mlxsw_sp_port->sample->truncate = act->sample.truncate;
mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a); mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
mlxsw_sp_port->sample->rate = tcf_sample_rate(a); mlxsw_sp_port->sample->rate = act->sample.rate;
err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a)); err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
if (err) if (err)
goto err_port_sample_set; goto err_port_sample_set;
return 0; return 0;
...@@ -1350,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1350,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
{ {
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol; __be16 protocol = f->common.protocol;
const struct tc_action *a; struct flow_action_entry *act;
int err; int err;
if (!tcf_exts_has_one_action(f->exts)) { if (!flow_offload_has_one_action(&f->rule->action)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1363,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1363,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM; return -ENOMEM;
mall_tc_entry->cookie = f->cookie; mall_tc_entry->cookie = f->cookie;
a = tcf_exts_first_action(f->exts); act = &f->rule->action.entries[0];
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror; struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror; mirror = &mall_tc_entry->mirror;
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
mirror, a, ingress); mirror, act,
} else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { ingress);
} else if (act->id == FLOW_ACTION_SAMPLE &&
protocol == htons(ETH_P_ALL)) {
mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
a, ingress); act, ingress);
} else { } else {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
} }
......
...@@ -43,7 +43,8 @@ nfp-objs += \ ...@@ -43,7 +43,8 @@ nfp-objs += \
flower/match.o \ flower/match.o \
flower/metadata.o \ flower/metadata.o \
flower/offload.o \ flower/offload.o \
flower/tunnel_conf.o flower/tunnel_conf.o \
flower/qos_conf.o
endif endif
ifeq ($(CONFIG_BPF_SYSCALL),y) ifeq ($(CONFIG_BPF_SYSCALL),y)
......
...@@ -278,6 +278,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -278,6 +278,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb); nfp_tunnel_keep_alive(app, skb);
break; break;
case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
nfp_flower_stats_rlim_reply(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
......
...@@ -416,6 +416,9 @@ enum nfp_flower_cmsg_type_port { ...@@ -416,6 +416,9 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14, NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_MAX = 32, NFP_FLOWER_CMSG_TYPE_MAX = 32,
}; };
......
...@@ -776,6 +776,9 @@ static int nfp_flower_init(struct nfp_app *app) ...@@ -776,6 +776,9 @@ static int nfp_flower_init(struct nfp_app *app)
nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
} }
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_init(app);
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv); INIT_LIST_HEAD(&app_priv->non_repr_priv);
...@@ -799,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app) ...@@ -799,6 +802,9 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low); skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work); flush_work(&app_priv->cmsg_work);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag); nfp_flower_lag_cleanup(&app_priv->nfp_lag);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define __NFP_FLOWER_H__ 1 #define __NFP_FLOWER_H__ 1
#include "cmsg.h" #include "cmsg.h"
#include "../nfp_net.h"
#include <linux/circ_buf.h> #include <linux/circ_buf.h>
#include <linux/hashtable.h> #include <linux/hashtable.h>
...@@ -39,6 +40,7 @@ struct nfp_app; ...@@ -39,6 +40,7 @@ struct nfp_app;
#define NFP_FL_NBI_MTU_SETTING BIT(1) #define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2) #define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3) #define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5) #define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30) #define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31) #define NFP_FL_FEATS_LAG BIT(31)
...@@ -157,6 +159,9 @@ struct nfp_fl_internal_ports { ...@@ -157,6 +159,9 @@ struct nfp_fl_internal_ports {
* @active_mem_unit: Current active memory unit for flower rules * @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules * @total_mem_units: Total number of available memory units for flower rules
* @internal_ports: Internal port ids used in offloaded rules * @internal_ports: Internal port ids used in offloaded rules
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
*/ */
struct nfp_flower_priv { struct nfp_flower_priv {
struct nfp_app *app; struct nfp_app *app;
...@@ -185,6 +190,23 @@ struct nfp_flower_priv { ...@@ -185,6 +190,23 @@ struct nfp_flower_priv {
unsigned int active_mem_unit; unsigned int active_mem_unit;
unsigned int total_mem_units; unsigned int total_mem_units;
struct nfp_fl_internal_ports internal_ports; struct nfp_fl_internal_ports internal_ports;
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
};
/**
* struct nfp_fl_qos - Flower APP priv data for quality of service
* @netdev_port_id: NFP port number of repr with qos info
* @curr_stats: Currently stored stats updates for qos info
* @prev_stats: Previously stored updates for qos info
* @last_update: Stored time when last stats were updated
*/
struct nfp_fl_qos {
u32 netdev_port_id;
struct nfp_stat_pair curr_stats;
struct nfp_stat_pair prev_stats;
u64 last_update;
}; };
/** /**
...@@ -194,6 +216,7 @@ struct nfp_flower_priv { ...@@ -194,6 +216,7 @@ struct nfp_flower_priv {
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr * @mac_offloaded: Flag indicating a MAC address is offloaded for repr
* @offloaded_mac_addr: MAC address that has been offloaded for repr * @offloaded_mac_addr: MAC address that has been offloaded for repr
* @mac_list: List entry of reprs that share the same offloaded MAC * @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos
*/ */
struct nfp_flower_repr_priv { struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr; struct nfp_repr *nfp_repr;
...@@ -201,6 +224,7 @@ struct nfp_flower_repr_priv { ...@@ -201,6 +224,7 @@ struct nfp_flower_repr_priv {
bool mac_offloaded; bool mac_offloaded;
u8 offloaded_mac_addr[ETH_ALEN]; u8 offloaded_mac_addr[ETH_ALEN];
struct list_head mac_list; struct list_head mac_list;
struct nfp_fl_qos qos_table;
}; };
/** /**
...@@ -366,6 +390,11 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app, ...@@ -366,6 +390,11 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct nfp_fl_pre_lag *pre_act); struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app, int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master); struct net_device *master);
void nfp_flower_qos_init(struct nfp_app *app);
void nfp_flower_qos_cleanup(struct nfp_app *app);
int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_matchall_offload *flow);
void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app, int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev, struct net_device *netdev,
unsigned long event); unsigned long event);
......
...@@ -1185,6 +1185,9 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, ...@@ -1185,6 +1185,9 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
case TC_SETUP_CLSFLOWER: case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev, return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data); type_data);
case TC_SETUP_CLSMATCHALL:
return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
type_data);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
This diff is collapsed.
...@@ -118,6 +118,8 @@ enum flow_action_id { ...@@ -118,6 +118,8 @@ enum flow_action_id {
FLOW_ACTION_MARK, FLOW_ACTION_MARK,
FLOW_ACTION_WAKE, FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE, FLOW_ACTION_QUEUE,
FLOW_ACTION_SAMPLE,
FLOW_ACTION_POLICE,
}; };
/* This is mirroring enum pedit_header_type definition for easy mapping between /* This is mirroring enum pedit_header_type definition for easy mapping between
...@@ -157,6 +159,16 @@ struct flow_action_entry { ...@@ -157,6 +159,16 @@ struct flow_action_entry {
u32 index; u32 index;
u8 vf; u8 vf;
} queue; } queue;
struct { /* FLOW_ACTION_SAMPLE */
struct psample_group *psample_group;
u32 rate;
u32 trunc_size;
bool truncate;
} sample;
struct { /* FLOW_ACTION_POLICE */
s64 burst;
u64 rate_bytes_ps;
} police;
}; };
}; };
...@@ -170,6 +182,17 @@ static inline bool flow_action_has_entries(const struct flow_action *action) ...@@ -170,6 +182,17 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
return action->num_entries; return action->num_entries;
} }
/**
* flow_action_has_one_action() - check if exactly one action is present
* @action: tc filter flow offload action
*
* Returns true if exactly one action is present.
*/
static inline bool flow_offload_has_one_action(const struct flow_action *action)
{
return action->num_entries == 1;
}
#define flow_action_for_each(__i, __act, __actions) \ #define flow_action_for_each(__i, __act, __actions) \
for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
......
...@@ -100,6 +100,11 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, ...@@ -100,6 +100,11 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode); struct tcf_result *res, bool compat_mode);
#else #else
static inline bool tcf_block_shared(struct tcf_block *block)
{
return false;
}
static inline static inline
int tcf_block_get(struct tcf_block **p_block, int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
...@@ -371,30 +376,6 @@ static inline bool tcf_exts_has_actions(struct tcf_exts *exts) ...@@ -371,30 +376,6 @@ static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
#endif #endif
} }
/**
* tcf_exts_has_one_action - check if exactly one action is present
* @exts: tc filter extensions handle
*
* Returns true if exactly one action is present.
*/
static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->nr_actions == 1;
#else
return false;
#endif
}
static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
return exts->actions[0];
#else
return NULL;
#endif
}
/** /**
* tcf_exts_exec - execute tc filter extensions * tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer * @skb: socket buffer
...@@ -648,6 +629,7 @@ struct tc_cls_common_offload { ...@@ -648,6 +629,7 @@ struct tc_cls_common_offload {
u32 chain_index; u32 chain_index;
__be16 protocol; __be16 protocol;
u32 prio; u32 prio;
struct tcf_block *block;
struct netlink_ext_ack *extack; struct netlink_ext_ack *extack;
}; };
...@@ -749,11 +731,13 @@ static inline bool tc_in_hw(u32 flags) ...@@ -749,11 +731,13 @@ static inline bool tc_in_hw(u32 flags)
static inline void static inline void
tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
const struct tcf_proto *tp, u32 flags, const struct tcf_proto *tp, u32 flags,
struct tcf_block *block,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
cls_common->chain_index = tp->chain->index; cls_common->chain_index = tp->chain->index;
cls_common->protocol = tp->protocol; cls_common->protocol = tp->protocol;
cls_common->prio = tp->prio; cls_common->prio = tp->prio;
cls_common->block = block;
if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
cls_common->extack = extack; cls_common->extack = extack;
} }
...@@ -784,12 +768,14 @@ tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) ...@@ -784,12 +768,14 @@ tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
enum tc_matchall_command { enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE, TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY, TC_CLSMATCHALL_DESTROY,
TC_CLSMATCHALL_STATS,
}; };
struct tc_cls_matchall_offload { struct tc_cls_matchall_offload {
struct tc_cls_common_offload common; struct tc_cls_common_offload common;
enum tc_matchall_command command; enum tc_matchall_command command;
struct tcf_exts *exts; struct flow_rule *rule;
struct flow_stats stats;
unsigned long cookie; unsigned long cookie;
}; };
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NET_TC_POLICE_H
#define __NET_TC_POLICE_H
#include <net/act_api.h>
struct tcf_police_params {
int tcfp_result;
u32 tcfp_ewma_rate;
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
struct rcu_head rcu;
};
struct tcf_police {
struct tc_action common;
struct tcf_police_params __rcu *params;
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
s64 tcfp_t_c;
};
#define to_police(pc) ((struct tcf_police *)pc)
/* old policer structure from before tc actions */
struct tc_police_compat {
u32 index;
int action;
u32 limit;
u32 burst;
u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
};
static inline bool is_tcf_police(const struct tc_action *act)
{
#ifdef CONFIG_NET_CLS_ACT
if (act->ops && act->ops->id == TCA_ID_POLICE)
return true;
#endif
return false;
}
static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
params = rcu_dereference_bh(police->params);
return params->rate.rate_bytes_ps;
}
static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
{
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
params = rcu_dereference_bh(police->params);
return params->tcfp_burst;
}
#endif /* __NET_TC_POLICE_H */
...@@ -778,27 +778,25 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, ...@@ -778,27 +778,25 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
struct dsa_mall_tc_entry *mall_tc_entry; struct dsa_mall_tc_entry *mall_tc_entry;
__be16 protocol = cls->common.protocol; __be16 protocol = cls->common.protocol;
struct dsa_switch *ds = dp->ds; struct dsa_switch *ds = dp->ds;
struct net_device *to_dev; struct flow_action_entry *act;
const struct tc_action *a;
struct dsa_port *to_dp; struct dsa_port *to_dp;
int err = -EOPNOTSUPP; int err = -EOPNOTSUPP;
if (!ds->ops->port_mirror_add) if (!ds->ops->port_mirror_add)
return err; return err;
if (!tcf_exts_has_one_action(cls->exts)) if (!flow_offload_has_one_action(&cls->rule->action))
return err; return err;
a = tcf_exts_first_action(cls->exts); act = &cls->rule->action.entries[0];
if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct dsa_mall_mirror_tc_entry *mirror; struct dsa_mall_mirror_tc_entry *mirror;
to_dev = tcf_mirred_dev(a); if (!act->dev)
if (!to_dev)
return -EINVAL; return -EINVAL;
if (!dsa_slave_dev_check(to_dev)) if (!dsa_slave_dev_check(act->dev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
...@@ -809,7 +807,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, ...@@ -809,7 +807,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
mall_tc_entry->type = DSA_PORT_MALL_MIRROR; mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror; mirror = &mall_tc_entry->mirror;
to_dp = dsa_slave_to_port(to_dev); to_dp = dsa_slave_to_port(act->dev);
mirror->to_local_port = to_dp->index; mirror->to_local_port = to_dp->index;
mirror->ingress = ingress; mirror->ingress = ingress;
......
...@@ -22,42 +22,7 @@ ...@@ -22,42 +22,7 @@
#include <net/act_api.h> #include <net/act_api.h>
#include <net/netlink.h> #include <net/netlink.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tc_act/tc_police.h>
struct tcf_police_params {
int tcfp_result;
u32 tcfp_ewma_rate;
s64 tcfp_burst;
u32 tcfp_mtu;
s64 tcfp_mtu_ptoks;
struct psched_ratecfg rate;
bool rate_present;
struct psched_ratecfg peak;
bool peak_present;
struct rcu_head rcu;
};
struct tcf_police {
struct tc_action common;
struct tcf_police_params __rcu *params;
spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
s64 tcfp_toks;
s64 tcfp_ptoks;
s64 tcfp_t_c;
};
#define to_police(pc) ((struct tcf_police *)pc)
/* old policer structure from before tc actions */
struct tc_police_compat {
u32 index;
int action;
u32 limit;
u32 burst;
u32 mtu;
struct tc_ratespec rate;
struct tc_ratespec peakrate;
};
/* Each policer is serialized by its individual spinlock */ /* Each policer is serialized by its individual spinlock */
...@@ -317,6 +282,20 @@ static void tcf_police_cleanup(struct tc_action *a) ...@@ -317,6 +282,20 @@ static void tcf_police_cleanup(struct tc_action *a)
kfree_rcu(p, rcu); kfree_rcu(p, rcu);
} }
static void tcf_police_stats_update(struct tc_action *a,
u64 bytes, u32 packets,
u64 lastuse, bool hw)
{
struct tcf_police *police = to_police(a);
struct tcf_t *tm = &police->tcf_tm;
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
if (hw)
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
bytes, packets);
tm->lastuse = max_t(u64, tm->lastuse, lastuse);
}
static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref) int bind, int ref)
{ {
...@@ -380,6 +359,7 @@ static struct tc_action_ops act_police_ops = { ...@@ -380,6 +359,7 @@ static struct tc_action_ops act_police_ops = {
.kind = "police", .kind = "police",
.id = TCA_ID_POLICE, .id = TCA_ID_POLICE,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.stats_update = tcf_police_stats_update,
.act = tcf_police_act, .act = tcf_police_act,
.dump = tcf_police_dump, .dump = tcf_police_dump,
.init = tcf_police_init, .init = tcf_police_init,
......
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_police.h>
#include <net/tc_act/tc_sample.h>
#include <net/tc_act/tc_skbedit.h> #include <net/tc_act/tc_skbedit.h>
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
...@@ -3257,6 +3259,18 @@ int tc_setup_flow_action(struct flow_action *flow_action, ...@@ -3257,6 +3259,18 @@ int tc_setup_flow_action(struct flow_action *flow_action,
} else if (is_tcf_skbedit_mark(act)) { } else if (is_tcf_skbedit_mark(act)) {
entry->id = FLOW_ACTION_MARK; entry->id = FLOW_ACTION_MARK;
entry->mark = tcf_skbedit_mark(act); entry->mark = tcf_skbedit_mark(act);
} else if (is_tcf_sample(act)) {
entry->id = FLOW_ACTION_SAMPLE;
entry->sample.psample_group =
tcf_sample_psample_group(act);
entry->sample.trunc_size = tcf_sample_trunc_size(act);
entry->sample.truncate = tcf_sample_truncate(act);
entry->sample.rate = tcf_sample_rate(act);
} else if (is_tcf_police(act)) {
entry->id = FLOW_ACTION_POLICE;
entry->police.burst = tcf_police_tcfp_burst(act);
entry->police.rate_bytes_ps =
tcf_police_rate_bytes_ps(act);
} else { } else {
goto err_out; goto err_out;
} }
......
...@@ -157,7 +157,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, ...@@ -157,7 +157,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
skip_sw = prog && tc_skip_sw(prog->gen_flags); skip_sw = prog && tc_skip_sw(prog->gen_flags);
obj = prog ?: oldprog; obj = prog ?: oldprog;
tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, block,
extack); extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &obj->exts; cls_bpf.exts = &obj->exts;
...@@ -227,7 +227,8 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp, ...@@ -227,7 +227,8 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_bpf_offload cls_bpf = {}; struct tc_cls_bpf_offload cls_bpf = {};
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL); tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, block,
NULL);
cls_bpf.command = TC_CLSBPF_STATS; cls_bpf.command = TC_CLSBPF_STATS;
cls_bpf.exts = &prog->exts; cls_bpf.exts = &prog->exts;
cls_bpf.prog = prog->filter; cls_bpf.prog = prog->filter;
...@@ -669,7 +670,7 @@ static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -669,7 +670,7 @@ static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
continue; continue;
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
extack); block, extack);
cls_bpf.command = TC_CLSBPF_OFFLOAD; cls_bpf.command = TC_CLSBPF_OFFLOAD;
cls_bpf.exts = &prog->exts; cls_bpf.exts = &prog->exts;
cls_bpf.prog = add ? prog->filter : NULL; cls_bpf.prog = add ? prog->filter : NULL;
......
...@@ -389,7 +389,8 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, ...@@ -389,7 +389,8 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
if (!rtnl_held) if (!rtnl_held)
rtnl_lock(); rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block,
extack);
cls_flower.command = TC_CLSFLOWER_DESTROY; cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
...@@ -422,7 +423,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, ...@@ -422,7 +423,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
goto errout; goto errout;
} }
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block,
extack);
cls_flower.command = TC_CLSFLOWER_REPLACE; cls_flower.command = TC_CLSFLOWER_REPLACE;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
cls_flower.rule->match.dissector = &f->mask->dissector; cls_flower.rule->match.dissector = &f->mask->dissector;
...@@ -478,7 +480,8 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, ...@@ -478,7 +480,8 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
if (!rtnl_held) if (!rtnl_held)
rtnl_lock(); rtnl_lock();
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, block,
NULL);
cls_flower.command = TC_CLSFLOWER_STATS; cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f; cls_flower.cookie = (unsigned long) f;
cls_flower.classid = f->res.classid; cls_flower.classid = f->res.classid;
...@@ -1757,7 +1760,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -1757,7 +1760,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
} }
tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
extack); block, extack);
cls_flower.command = add ? cls_flower.command = add ?
TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY; TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long)f; cls_flower.cookie = (unsigned long)f;
......
...@@ -71,7 +71,8 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, ...@@ -71,7 +71,8 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
struct tc_cls_matchall_offload cls_mall = {}; struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
extack);
cls_mall.command = TC_CLSMATCHALL_DESTROY; cls_mall.command = TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = cookie; cls_mall.cookie = cookie;
...@@ -89,12 +90,30 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, ...@@ -89,12 +90,30 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
bool skip_sw = tc_skip_sw(head->flags); bool skip_sw = tc_skip_sw(head->flags);
int err; int err;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
if (!cls_mall.rule)
return -ENOMEM;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
extack);
cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.exts = &head->exts;
cls_mall.cookie = cookie; cls_mall.cookie = cookie;
err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
mall_destroy_hw_filter(tp, head, cookie, NULL);
if (skip_sw)
NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
else
err = 0;
return err;
}
err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw); err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw);
kfree(cls_mall.rule);
if (err < 0) { if (err < 0) {
mall_destroy_hw_filter(tp, head, cookie, NULL); mall_destroy_hw_filter(tp, head, cookie, NULL);
return err; return err;
...@@ -272,13 +291,28 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -272,13 +291,28 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
if (tc_skip_hw(head->flags)) if (tc_skip_hw(head->flags))
return 0; return 0;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack); cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
if (!cls_mall.rule)
return -ENOMEM;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
extack);
cls_mall.command = add ? cls_mall.command = add ?
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
cls_mall.exts = &head->exts;
cls_mall.cookie = (unsigned long)head; cls_mall.cookie = (unsigned long)head;
err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
if (add && tc_skip_sw(head->flags)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
return err;
}
}
err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv); err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
kfree(cls_mall.rule);
if (err) { if (err) {
if (add && tc_skip_sw(head->flags)) if (add && tc_skip_sw(head->flags))
return err; return err;
...@@ -290,6 +324,24 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, ...@@ -290,6 +324,24 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
return 0; return 0;
} }
static void mall_stats_hw_filter(struct tcf_proto *tp,
struct cls_mall_head *head,
unsigned long cookie)
{
struct tc_cls_matchall_offload cls_mall = {};
struct tcf_block *block = tp->chain->block;
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, block,
NULL);
cls_mall.command = TC_CLSMATCHALL_STATS;
cls_mall.cookie = cookie;
tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
cls_mall.stats.pkts, cls_mall.stats.lastused);
}
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
{ {
...@@ -301,6 +353,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, ...@@ -301,6 +353,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (!head) if (!head)
return skb->len; return skb->len;
if (!tc_skip_hw(head->flags))
mall_stats_hw_filter(tp, head, (unsigned long)head);
t->tcm_handle = head->handle; t->tcm_handle = head->handle;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS); nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
......
...@@ -485,7 +485,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, ...@@ -485,7 +485,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, block,
extack);
cls_u32.command = TC_CLSU32_DELETE_HNODE; cls_u32.command = TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
...@@ -503,7 +504,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, ...@@ -503,7 +504,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
bool offloaded = false; bool offloaded = false;
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack);
cls_u32.command = TC_CLSU32_NEW_HNODE; cls_u32.command = TC_CLSU32_NEW_HNODE;
cls_u32.hnode.divisor = h->divisor; cls_u32.hnode.divisor = h->divisor;
cls_u32.hnode.handle = h->handle; cls_u32.hnode.handle = h->handle;
...@@ -529,7 +530,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -529,7 +530,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tcf_block *block = tp->chain->block; struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block,
extack);
cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
...@@ -546,7 +548,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -546,7 +548,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
bool skip_sw = tc_skip_sw(flags); bool skip_sw = tc_skip_sw(flags);
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, flags, block, extack);
cls_u32.command = TC_CLSU32_REPLACE_KNODE; cls_u32.command = TC_CLSU32_REPLACE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
cls_u32.knode.fshift = n->fshift; cls_u32.knode.fshift = n->fshift;
...@@ -1170,10 +1172,12 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, ...@@ -1170,10 +1172,12 @@ static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
bool add, tc_setup_cb_t *cb, void *cb_priv, bool add, tc_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct tcf_block *block = tp->chain->block;
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, block,
extack);
cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
cls_u32.hnode.divisor = ht->divisor; cls_u32.hnode.divisor = ht->divisor;
cls_u32.hnode.handle = ht->handle; cls_u32.hnode.handle = ht->handle;
...@@ -1195,7 +1199,8 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n, ...@@ -1195,7 +1199,8 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
struct tc_cls_u32_offload cls_u32 = {}; struct tc_cls_u32_offload cls_u32 = {};
int err; int err;
tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack); tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, block,
extack);
cls_u32.command = add ? cls_u32.command = add ?
TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE; TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle; cls_u32.knode.handle = n->handle;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment