Commit 9eabd188 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso Committed by David S. Miller

mlx5: update indirect block support

Register ndo callback via flow_indr_dev_register() and
flow_indr_dev_unregister().

No need for mlx5e_rep_indr_clean_block_privs() since flow_block_cb_free()
already releases the internal mapping via ->release callback, which in
this case is mlx5e_rep_indr_tc_block_unbind().
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0fdcf78d
......@@ -306,20 +306,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
return NULL;
}
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev);
void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
{
struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
list_for_each_entry_safe(cb_priv, temp, head, list) {
mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
kfree(cb_priv);
}
}
static int
mlx5e_rep_indr_offload(struct net_device *netdev,
struct flow_cls_offload *flower,
......@@ -423,9 +409,14 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
struct flow_block_offload *f,
flow_setup_cb_t *setup_cb)
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5e_rep_indr_block_priv *indr_priv;
struct flow_block_cb *block_cb;
if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
!(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
......@@ -492,76 +483,20 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
}
}
static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
int err;
err = __flow_indr_block_cb_register(netdev, rpriv,
mlx5e_rep_indr_setup_cb,
rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
netdev_name(netdev), err);
}
return err;
}
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
__flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_cb,
rpriv);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
uplink_priv.netdevice_nb);
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
!(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
return NOTIFY_OK;
switch (event) {
case NETDEV_REGISTER:
mlx5e_rep_indr_register_block(rpriv, netdev);
break;
case NETDEV_UNREGISTER:
mlx5e_rep_indr_unregister_block(rpriv, netdev);
break;
}
return NOTIFY_OK;
}
int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
{
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
int err;
/* init indirect block notifications */
INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
err = register_netdevice_notifier_dev_net(rpriv->netdev,
&uplink_priv->netdevice_nb,
&uplink_priv->netdevice_nn);
return err;
return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv);
}
void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
{
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
/* clean indirect TC block notifications */
unregister_netdevice_notifier_dev_net(rpriv->netdev,
&uplink_priv->netdevice_nb,
&uplink_priv->netdevice_nn);
flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
mlx5e_rep_indr_setup_tc_cb);
}
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
......
......@@ -33,7 +33,6 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv);
bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb,
......@@ -65,9 +64,6 @@ static inline int
mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data) { return -EOPNOTSUPP; }
static inline void
mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) {}
struct mlx5e_tc_update_priv;
static inline bool
mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
......
......@@ -1018,7 +1018,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
{
mlx5e_rep_tc_netdevice_event_unregister(rpriv);
mlx5e_rep_indr_clean_block_privs(rpriv);
mlx5e_rep_bond_cleanup(rpriv);
mlx5e_rep_tc_cleanup(rpriv);
}
......
......@@ -69,13 +69,8 @@ struct mlx5_rep_uplink_priv {
* tc_indr_block_cb_priv_list is used to lookup indirect callback
* private data
*
* netdevice_nb is the netdev events notifier - used to register
* tunnel devices for block events
*
*/
struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb;
struct netdev_net_notifier netdevice_nn;
struct mlx5_tun_entropy tun_entropy;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment