Commit bb5e6a82 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-sched-indirect-tc-block-cb-registration'

Jakub Kicinski says:

====================
net: sched: indirect tc block cb registration

John says:

This patchset introduces an alternative to egdev offload by allowing a
driver to register for block updates when an external device (e.g. tunnel
netdev) is bound to a TC block. Drivers can track new netdevs or register
to existing ones to receive information on such events. Based on this,
they may register for block offload rules using already existing
functions.

The patchset also implements this new indirect block registration in the
NFP driver to allow the offloading of tunnel rules. The use of egdev
offload (which is currently only used for tunnel offload) is subsequently
removed.

RFC v2 -> PATCH
 - removed embedded tracking function from indir block register (now up to
   driver to clean up after itself)
 - refactored NFP code due to recent submissions
 - removed priv list clean function in NFP (list should be cleared by
   indirect block unregisters)

RFC v1->v2:
 - free allocated owner struct in block_owner_clean function
 - add geneve type helper function
 - move test stub in NFP (v1 patch 2) to full tunnel offload
   implementation via indirect blocks (v2 patches 3-8)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d1ce0114 d4b69bad
......@@ -2,7 +2,6 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
......@@ -11,7 +10,6 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/vxlan.h>
#include "cmsg.h"
#include "main.h"
......@@ -92,18 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
return act_size;
}
static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
enum nfp_flower_tun_type tun_type)
{
if (netif_is_vxlan(out_dev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
if (netif_is_geneve(out_dev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
return false;
}
static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
......@@ -149,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
/* Only offload if egress ports are on the same device as the
* ingress port.
*/
if (!switchdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
if (nfp_netdev_is_nfp_repr(in_dev)) {
/* Confirm ingress and egress are on same device. */
if (!switchdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
}
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
......@@ -840,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
struct nfp_repr *repr = netdev_priv(netdev);
*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
*tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
......
......@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
#include <net/vxlan.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
......@@ -499,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
return skb->len - NFP_FLOWER_CMSG_HLEN;
}
static inline bool
nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
enum nfp_flower_tun_type tun_type)
{
if (netif_is_vxlan(netdev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
if (netif_is_geneve(netdev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
return false;
}
static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
{
if (!netdev->rtnl_link_ops)
return false;
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
return true;
if (netif_is_vxlan(netdev))
return true;
if (netif_is_geneve(netdev))
return true;
return false;
}
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
......
......@@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
}
static int
nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
{
return tc_setup_cb_egdev_register(netdev,
nfp_flower_setup_tc_egress_cb,
netdev_priv(netdev));
}
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
kfree(repr->app_priv);
tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
netdev_priv(netdev));
}
static void
......@@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
return 0;
err_cleanup_metadata:
......@@ -684,6 +675,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
return ret;
}
ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
if (ret & NOTIFY_STOP_MASK)
return ret;
return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
......@@ -705,7 +700,6 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
.repr_init = nfp_flower_repr_netdev_init,
.repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
......
......@@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
......@@ -130,6 +129,7 @@ struct nfp_fl_lag {
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
* @indr_block_cb_priv: List of priv data passed to indirect block cbs
*/
struct nfp_flower_priv {
struct nfp_app *app;
......@@ -162,6 +162,7 @@ struct nfp_flower_priv {
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
struct list_head indr_block_cb_priv;
};
/**
......@@ -205,7 +206,6 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
bool ingress_offload;
};
extern const struct rhashtable_params nfp_flower_table_params;
......@@ -222,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
......@@ -240,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx);
struct net_device *netdev);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
......@@ -256,8 +257,6 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
......@@ -270,5 +269,8 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event);
#endif
......@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
if (tun_type)
if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
else
} else {
if (!cmsg_port)
return -EOPNOTSUPP;
frame->in_port = cpu_to_be32(cmsg_port);
}
return 0;
}
......@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
}
}
int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
struct nfp_repr *netdev_repr;
u32 cmsg_port = 0;
int err;
u8 *ext;
u8 *msk;
if (nfp_netdev_is_nfp_repr(netdev))
cmsg_port = nfp_repr_get_port_id(netdev);
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
......@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
nfp_repr_get_port_id(netdev),
false, tun_type);
cmsg_port, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
nfp_repr_get_port_id(netdev),
true, tun_type);
cmsg_port, true, tun_type);
if (err)
return err;
......@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
if (nfp_netdev_is_nfp_repr(netdev)) {
netdev_repr = netdev_priv(netdev);
nfp_tunnel_write_macs(netdev_repr->app);
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
}
nfp_tunnel_write_macs(app);
/* Store the tunnel destination in the rule data.
* This must be present and be an exact match.
*/
nfp_flow->nfp_tun_ipv4_addr = tun_dst;
nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, flow, false);
......
......@@ -21,7 +21,6 @@ struct nfp_mask_id_table {
struct nfp_fl_flow_table_cmp_arg {
struct net_device *netdev;
unsigned long cookie;
__be32 host_ctx;
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
......@@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev, __be32 host_ctx)
struct net_device *netdev)
{
struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
flower_cmp_arg.netdev = netdev;
flower_cmp_arg.cookie = tc_flower_cookie;
flower_cmp_arg.host_ctx = host_ctx;
return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
nfp_flower_table_params);
......@@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
nfp_flow->ingress_dev = netdev;
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
......@@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
NFP_FL_STATS_CTX_DONT_CARE);
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
......@@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
const struct nfp_fl_payload *flow_entry = obj;
if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
(cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
if (flow_entry->ingress_dev == cmp_arg->netdev)
return flow_entry->tc_flower_cookie != cmp_arg->cookie;
return 1;
......
......@@ -4,7 +4,6 @@
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <net/netevent.h>
#include <net/vxlan.h>
#include <linux/idr.h>
#include <net/dst_metadata.h>
#include <net/arp.h>
......@@ -182,20 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
}
static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
{
if (!netdev->rtnl_link_ops)
return false;
if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
return true;
if (netif_is_vxlan(netdev))
return true;
if (netif_is_geneve(netdev))
return true;
return false;
}
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
......@@ -617,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_repr_get_port_id(netdev);
else if (!nfp_tun_is_netdev_to_offload(netdev))
else if (!nfp_fl_is_netdev_to_offload(netdev))
return;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
......@@ -660,7 +645,7 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
{
if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
/* If non-nfp netdev then free its offload index. */
if (nfp_tun_is_netdev_to_offload(netdev))
if (nfp_fl_is_netdev_to_offload(netdev))
nfp_tun_del_mac_idx(app, netdev->ifindex);
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
event == NETDEV_REGISTER) {
......
......@@ -81,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block,
struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
void __tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
void tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
......@@ -183,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block,
{
}
static inline
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
return 0;
}
static inline
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
return 0;
}
static inline
void __tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
}
static inline
void tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
}
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
......
......@@ -24,6 +24,9 @@ struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
enum tc_setup_type type, void *type_data);
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
......
......@@ -25,6 +25,7 @@
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/rhashtable.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
......@@ -365,6 +366,245 @@ static void tcf_chain_flush(struct tcf_chain *chain)
}
}
static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
{
const struct Qdisc_class_ops *cops;
struct Qdisc *qdisc;
if (!dev_ingress_queue(dev))
return NULL;
qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
if (!qdisc)
return NULL;
cops = qdisc->ops->cl_ops;
if (!cops)
return NULL;
if (!cops->tcf_block)
return NULL;
return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
}
static struct rhashtable indr_setup_block_ht;
struct tc_indr_block_dev {
struct rhash_head ht_node;
struct net_device *dev;
unsigned int refcnt;
struct list_head cb_list;
struct tcf_block *block;
};
struct tc_indr_block_cb {
struct list_head list;
void *cb_priv;
tc_indr_block_bind_cb_t *cb;
void *cb_ident;
};
static const struct rhashtable_params tc_indr_setup_block_ht_params = {
.key_offset = offsetof(struct tc_indr_block_dev, dev),
.head_offset = offsetof(struct tc_indr_block_dev, ht_node),
.key_len = sizeof(struct net_device *),
};
static struct tc_indr_block_dev *
tc_indr_block_dev_lookup(struct net_device *dev)
{
return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
tc_indr_setup_block_ht_params);
}
static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
{
struct tc_indr_block_dev *indr_dev;
indr_dev = tc_indr_block_dev_lookup(dev);
if (indr_dev)
goto inc_ref;
indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
if (!indr_dev)
return NULL;
INIT_LIST_HEAD(&indr_dev->cb_list);
indr_dev->dev = dev;
indr_dev->block = tc_dev_ingress_block(dev);
if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
tc_indr_setup_block_ht_params)) {
kfree(indr_dev);
return NULL;
}
inc_ref:
indr_dev->refcnt++;
return indr_dev;
}
static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
{
if (--indr_dev->refcnt)
return;
rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
tc_indr_setup_block_ht_params);
kfree(indr_dev);
}
static struct tc_indr_block_cb *
tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct tc_indr_block_cb *indr_block_cb;
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
if (indr_block_cb->cb == cb &&
indr_block_cb->cb_ident == cb_ident)
return indr_block_cb;
return NULL;
}
static struct tc_indr_block_cb *
tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct tc_indr_block_cb *indr_block_cb;
indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
if (indr_block_cb)
return ERR_PTR(-EEXIST);
indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
if (!indr_block_cb)
return ERR_PTR(-ENOMEM);
indr_block_cb->cb_priv = cb_priv;
indr_block_cb->cb = cb;
indr_block_cb->cb_ident = cb_ident;
list_add(&indr_block_cb->list, &indr_dev->cb_list);
return indr_block_cb;
}
static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
{
list_del(&indr_block_cb->list);
kfree(indr_block_cb);
}
static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
struct tc_indr_block_cb *indr_block_cb,
enum tc_block_command command)
{
struct tc_block_offload bo = {
.command = command,
.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
.block = indr_dev->block,
};
if (!indr_dev->block)
return;
indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
&bo);
}
int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct tc_indr_block_cb *indr_block_cb;
struct tc_indr_block_dev *indr_dev;
int err;
indr_dev = tc_indr_block_dev_get(dev);
if (!indr_dev)
return -ENOMEM;
indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
err = PTR_ERR_OR_ZERO(indr_block_cb);
if (err)
goto err_dev_put;
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
return 0;
err_dev_put:
tc_indr_block_dev_put(indr_dev);
return err;
}
EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
int err;
rtnl_lock();
err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
void __tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
struct tc_indr_block_cb *indr_block_cb;
struct tc_indr_block_dev *indr_dev;
indr_dev = tc_indr_block_dev_lookup(dev);
if (!indr_dev)
return;
indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
if (!indr_block_cb)
return;
/* Send unbind message if required to free any block cbs. */
tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
tc_indr_block_cb_del(indr_block_cb);
tc_indr_block_dev_put(indr_dev);
}
EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
void tc_indr_block_cb_unregister(struct net_device *dev,
tc_indr_block_bind_cb_t *cb, void *cb_ident)
{
rtnl_lock();
__tc_indr_block_cb_unregister(dev, cb, cb_ident);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
struct tcf_block_ext_info *ei,
enum tc_block_command command,
struct netlink_ext_ack *extack)
{
struct tc_indr_block_cb *indr_block_cb;
struct tc_indr_block_dev *indr_dev;
struct tc_block_offload bo = {
.command = command,
.binder_type = ei->binder_type,
.block = block,
.extack = extack,
};
indr_dev = tc_indr_block_dev_lookup(dev);
if (!indr_dev)
return;
indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
&bo);
}
static bool tcf_block_offload_in_use(struct tcf_block *block)
{
return block->offloadcnt;
......@@ -406,12 +646,17 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
if (err == -EOPNOTSUPP)
goto no_offload_dev_inc;
return err;
if (err)
return err;
tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
return 0;
no_offload_dev_inc:
if (tcf_block_offload_in_use(block))
return -EOPNOTSUPP;
block->nooffloaddevcnt++;
tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
return 0;
}
......@@ -421,6 +666,8 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
struct net_device *dev = q->dev_queue->dev;
int err;
tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
if (!dev->netdev_ops->ndo_setup_tc)
goto no_offload_dev_dec;
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
......@@ -2355,6 +2602,11 @@ static int __init tc_filter_init(void)
if (err)
goto err_register_pernet_subsys;
err = rhashtable_init(&indr_setup_block_ht,
&tc_indr_setup_block_ht_params);
if (err)
goto err_rhash_setup_block_ht;
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
......@@ -2366,6 +2618,8 @@ static int __init tc_filter_init(void)
return 0;
err_rhash_setup_block_ht:
unregister_pernet_subsys(&tcf_net_ops);
err_register_pernet_subsys:
destroy_workqueue(tc_filter_wq);
return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment