Commit 78c9df81 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlxsw-Preparation-for-RTNL-removal'

Ido Schimmel says:

====================
mlxsw: Preparation for RTNL removal

The driver currently acquires RTNL in its route insertion path, which
contributes to very large control plane latencies. This patch set
prepares mlxsw for RTNL removal from its route insertion path in a
follow-up patch set.

Patches #1-#2 protect shared resources - KVDL and counter pool - with
their own locks. All allocations of these resources are currently
performed under RTNL, so no locks were required.

Patches #3-#7 ensure that updates to mirroring sessions only take place
in case there are active mirroring sessions. This allows us to avoid
taking RTNL when it is unnecessary, as updating of the mirroring
sessions must be performed under RTNL for the time being.

Patches #8-#10 replace the use of APIs that assume that RTNL is taken
with their RCU counterparts. Specifically, patches #8 and #9 replace
__in_dev_get_rtnl() with __in_dev_get_rcu() under RCU read-side critical
section. Patch #10 replaces __dev_get_by_index() with
dev_get_by_index_rcu().

Patches #11-#15 perform small adjustments in the code to make it easier
to later introduce a router lock instead of relying on RTNL.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 807ea870 9ef87b24
......@@ -6316,7 +6316,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
!mlxsw_sp_rif_find_by_dev(mlxsw_sp, lower_dev)) {
!mlxsw_sp_rif_exists(mlxsw_sp, lower_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
......@@ -6472,7 +6472,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
return -EINVAL;
}
if (netif_is_macvlan(upper_dev) &&
!mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
!mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
......@@ -6549,7 +6549,7 @@ static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
!mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
!mlxsw_sp_rif_exists(mlxsw_sp, vlan_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
......@@ -6609,7 +6609,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
if (!info->linking)
break;
if (netif_is_macvlan(upper_dev) &&
!mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev)) {
!mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
return -EOPNOTSUPP;
}
......
......@@ -168,12 +168,8 @@ struct mlxsw_sp {
struct notifier_block netdevice_nb;
struct mlxsw_sp_ptp_clock *clock;
struct mlxsw_sp_ptp_state *ptp_state;
struct mlxsw_sp_counter_pool *counter_pool;
struct {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
struct mlxsw_sp_span *span;
const struct mlxsw_fw_rev *req_rev;
const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
......@@ -567,10 +563,10 @@ void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev);
u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
enum mlxsw_sp_l3proto ul_proto,
const union mlxsw_sp_l3addr *ul_sip,
......
......@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include "spectrum_cnt.h"
......@@ -18,6 +19,7 @@ struct mlxsw_sp_counter_sub_pool {
struct mlxsw_sp_counter_pool {
unsigned int pool_size;
unsigned long *usage; /* Usage bitmap */
spinlock_t counter_pool_lock; /* Protects counter pool allocations */
struct mlxsw_sp_counter_sub_pool *sub_pools;
};
......@@ -87,6 +89,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return -ENOMEM;
spin_lock_init(&pool->counter_pool_lock);
pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
......@@ -139,25 +142,35 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_counter_sub_pool *sub_pool;
unsigned int entry_index;
unsigned int stop_index;
int i;
int i, err;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
stop_index = sub_pool->base_index + sub_pool->size;
entry_index = sub_pool->base_index;
spin_lock(&pool->counter_pool_lock);
entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
if (entry_index == stop_index)
return -ENOBUFS;
if (entry_index == stop_index) {
err = -ENOBUFS;
goto err_alloc;
}
/* The sub-pools can contain non-integer number of entries
* so we must check for overflow
*/
if (entry_index + sub_pool->entry_size > stop_index)
return -ENOBUFS;
if (entry_index + sub_pool->entry_size > stop_index) {
err = -ENOBUFS;
goto err_alloc;
}
for (i = 0; i < sub_pool->entry_size; i++)
__set_bit(entry_index + i, pool->usage);
spin_unlock(&pool->counter_pool_lock);
*p_counter_index = entry_index;
return 0;
err_alloc:
spin_unlock(&pool->counter_pool_lock);
return err;
}
void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
......@@ -171,6 +184,8 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(counter_index >= pool->pool_size))
return;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
spin_lock(&pool->counter_pool_lock);
for (i = 0; i < sub_pool->entry_size; i++)
__clear_bit(counter_index + i, pool->usage);
spin_unlock(&pool->counter_pool_lock);
}
......@@ -2,12 +2,14 @@
/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "spectrum.h"
struct mlxsw_sp_kvdl {
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
struct mutex kvdl_lock; /* Protects kvdl allocations */
unsigned long priv[];
/* priv has to be always the last item */
};
......@@ -22,6 +24,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
mutex_init(&kvdl->kvdl_lock);
kvdl->kvdl_ops = kvdl_ops;
mlxsw_sp->kvdl = kvdl;
......@@ -31,6 +34,7 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_init:
mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
return err;
}
......@@ -40,6 +44,7 @@ void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
mutex_destroy(&kvdl->kvdl_lock);
kfree(kvdl);
}
......@@ -48,9 +53,14 @@ int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count, u32 *p_entry_index)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
int err;
return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
mutex_lock(&kvdl->kvdl_lock);
err = kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv, type,
entry_count, p_entry_index);
mutex_unlock(&kvdl->kvdl_lock);
return err;
}
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
......@@ -59,8 +69,10 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
mutex_lock(&kvdl->kvdl_lock);
kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, type,
entry_count, entry_index);
mutex_unlock(&kvdl->kvdl_lock);
}
int mlxsw_sp_kvdl_alloc_count_query(struct mlxsw_sp *mlxsw_sp,
......
......@@ -744,6 +744,8 @@ static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
if (nve->num_nve_tunnels++ != 0)
return 0;
nve->config = *config;
err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
&nve->tunnel_index);
if (err)
......@@ -760,6 +762,7 @@ static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
nve->tunnel_index);
err_kvdl_alloc:
memset(&nve->config, 0, sizeof(nve->config));
nve->num_nve_tunnels--;
return err;
}
......@@ -840,8 +843,6 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
goto err_fid_vni_set;
}
nve->config = config;
err = ops->fdb_replay(params->dev, params->vni, extack);
if (err)
goto err_fdb_replay;
......
......@@ -145,6 +145,9 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev);
static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
......@@ -988,17 +991,23 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
struct ip_tunnel *tun = netdev_priv(ol_dev);
struct net *net = dev_net(ol_dev);
return __dev_get_by_index(net, tun->parms.link);
return dev_get_by_index_rcu(net, tun->parms.link);
}
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
struct net_device *d;
u32 tb_id;
rcu_read_lock();
d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
return l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
return RT_TABLE_MAIN;
tb_id = RT_TABLE_MAIN;
rcu_read_unlock();
return tb_id;
}
static struct mlxsw_sp_rif *
......@@ -1355,8 +1364,12 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
ipip_list_node);
list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
struct net_device *ipip_ul_dev =
__mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
struct net_device *ol_dev = ipip_entry->ol_dev;
struct net_device *ipip_ul_dev;
rcu_read_lock();
ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
return ipip_entry;
......@@ -1722,9 +1735,12 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
ipip_list_node) {
struct net_device *ipip_ul_dev =
__mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
struct net_device *ol_dev = ipip_entry->ol_dev;
struct net_device *ipip_ul_dev;
rcu_read_lock();
ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
}
......@@ -3711,9 +3727,15 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
{
struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
struct net_device *ul_dev;
bool is_up;
return ul_dev ? (ul_dev->flags & IFF_UP) : true;
rcu_read_lock();
ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
rcu_read_unlock();
return is_up;
}
static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
......@@ -3840,10 +3862,14 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (!dev)
return 0;
in_dev = __in_dev_get_rtnl(dev);
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
if (err)
......@@ -6233,7 +6259,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return NOTIFY_BAD;
}
struct mlxsw_sp_rif *
static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
......@@ -6247,6 +6273,33 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
return NULL;
}
bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
return !!mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
}
u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
{
struct mlxsw_sp_rif *rif;
u16 vid = 0;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!rif)
goto out;
/* We only return the VID for VLAN RIFs. Otherwise we return an
* invalid value (0).
*/
if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
goto out;
vid = mlxsw_sp_fid_8021q_vid(rif->fid);
out:
return vid;
}
static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
......@@ -6281,7 +6334,8 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
case NETDEV_UP:
return rif == NULL;
case NETDEV_DOWN:
idev = __in_dev_get_rtnl(dev);
rcu_read_lock();
idev = __in_dev_get_rcu(dev);
if (idev && idev->ifa_list)
addr_list_empty = false;
......@@ -6289,6 +6343,7 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
if (addr_list_empty && inet6_dev &&
!list_empty(&inet6_dev->addr_list))
addr_list_empty = false;
rcu_read_unlock();
/* macvlans do not have a RIF, but rather piggy back on the
* RIF of their lower device.
......@@ -6411,11 +6466,6 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
return rif->dev;
}
struct mlxsw_sp_fid *mlxsw_sp_rif_fid(const struct mlxsw_sp_rif *rif)
{
return rif->fid;
}
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
......@@ -6631,8 +6681,8 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
return err;
}
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
static void
__mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
......@@ -6650,6 +6700,12 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_rif_subport_put(rif);
}
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
struct net_device *port_dev,
unsigned long event, u16 vid,
......@@ -6667,7 +6723,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
l3_dev, extack);
case NETDEV_DOWN:
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
__mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
break;
}
......@@ -6848,7 +6904,7 @@ static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
return err;
}
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
const struct net_device *macvlan_dev)
{
struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
......@@ -6866,6 +6922,12 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_index(rif->fid), false);
}
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
const struct net_device *macvlan_dev)
{
__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
}
static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *macvlan_dev,
unsigned long event,
......@@ -6875,7 +6937,7 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
case NETDEV_UP:
return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
case NETDEV_DOWN:
mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
__mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
break;
}
......
......@@ -3,6 +3,8 @@
#include <linux/if_bridge.h>
#include <linux/list.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
#include <net/arp.h>
#include <net/gre.h>
#include <net/lag.h>
......@@ -14,38 +16,43 @@
#include "spectrum_span.h"
#include "spectrum_switchdev.h"
struct mlxsw_sp_span {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
atomic_t active_entries_count;
int entries_count;
struct mlxsw_sp_span_entry entries[0];
};
static void mlxsw_sp_span_respin_work(struct work_struct *work);
static u64 mlxsw_sp_span_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
u64 occ = 0;
int i;
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
if (mlxsw_sp->span.entries[i].ref_count)
occ++;
}
return occ;
return atomic_read(&mlxsw_sp->span->active_entries_count);
}
int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
struct mlxsw_sp_span *span;
int i, entries_count;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
return -EIO;
mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
MAX_SPAN);
mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
sizeof(struct mlxsw_sp_span_entry),
GFP_KERNEL);
if (!mlxsw_sp->span.entries)
entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
if (!span)
return -ENOMEM;
span->entries_count = entries_count;
atomic_set(&span->active_entries_count, 0);
span->mlxsw_sp = mlxsw_sp;
mlxsw_sp->span = span;
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
INIT_LIST_HEAD(&curr->bound_ports_list);
curr->id = i;
......@@ -53,6 +60,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
mlxsw_sp_span_occ_get, mlxsw_sp);
INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
return 0;
}
......@@ -62,14 +70,15 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
cancel_work_sync(&mlxsw_sp->span->work);
devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
}
kfree(mlxsw_sp->span.entries);
kfree(mlxsw_sp->span);
}
static int
......@@ -645,15 +654,16 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
int i;
/* find a free entry to use */
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
if (!mlxsw_sp->span.entries[i].ref_count) {
span_entry = &mlxsw_sp->span.entries[i];
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
if (!mlxsw_sp->span->entries[i].ref_count) {
span_entry = &mlxsw_sp->span->entries[i];
break;
}
}
if (!span_entry)
return NULL;
atomic_inc(&mlxsw_sp->span->active_entries_count);
span_entry->ops = ops;
span_entry->ref_count = 1;
span_entry->to_dev = to_dev;
......@@ -662,9 +672,11 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
return span_entry;
}
static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_span_entry *span_entry)
{
mlxsw_sp_span_entry_deconfigure(span_entry);
atomic_dec(&mlxsw_sp->span->active_entries_count);
}
struct mlxsw_sp_span_entry *
......@@ -673,8 +685,8 @@ mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
{
int i;
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->to_dev == to_dev)
return curr;
......@@ -694,8 +706,8 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
{
int i;
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
if (curr->ref_count && curr->id == span_id)
return curr;
......@@ -726,7 +738,7 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
{
WARN_ON(!span_entry->ref_count);
if (--span_entry->ref_count == 0)
mlxsw_sp_span_entry_destroy(span_entry);
mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
return 0;
}
......@@ -736,8 +748,8 @@ static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
struct mlxsw_sp_span_inspected_port *p;
int i;
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
list_for_each_entry(p, &curr->bound_ports_list, list)
if (p->local_port == port->local_port &&
......@@ -842,9 +854,9 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
* so if a binding is requested, check for conflicts.
*/
if (bind)
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr =
&mlxsw_sp->span.entries[i];
&mlxsw_sp->span->entries[i];
if (mlxsw_sp_span_entry_bound_port_find(curr, type,
port, bind))
......@@ -988,14 +1000,18 @@ void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
}
void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
int i;
int err;
struct mlxsw_sp_span *span;
struct mlxsw_sp *mlxsw_sp;
int i, err;
ASSERT_RTNL();
for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
span = container_of(work, struct mlxsw_sp_span, work);
mlxsw_sp = span->mlxsw_sp;
rtnl_lock();
for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
struct mlxsw_sp_span_parms sparms = {NULL};
if (!curr->ref_count)
......@@ -1010,4 +1026,12 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
}
}
rtnl_unlock();
}
void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
{
if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
return;
mlxsw_core_schedule_work(&mlxsw_sp->span->work);
}
......@@ -1173,16 +1173,12 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
const struct switchdev_obj_port_vlan *vlan)
{
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_fid *fid;
u16 pvid;
u16 vid;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
if (!rif)
pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
if (!pvid)
return 0;
fid = mlxsw_sp_rif_fid(rif);
pvid = mlxsw_sp_fid_8021q_vid(fid);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
......@@ -1778,36 +1774,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
struct mlxsw_sp_span_respin_work {
struct work_struct work;
struct mlxsw_sp *mlxsw_sp;
};
static void mlxsw_sp_span_respin_work(struct work_struct *work)
{
struct mlxsw_sp_span_respin_work *respin_work =
container_of(work, struct mlxsw_sp_span_respin_work, work);
rtnl_lock();
mlxsw_sp_span_respin(respin_work->mlxsw_sp);
rtnl_unlock();
kfree(respin_work);
}
static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_span_respin_work *respin_work;
respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
if (!respin_work)
return;
INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
respin_work->mlxsw_sp = mlxsw_sp;
mlxsw_core_schedule_work(&respin_work->work);
}
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans,
......@@ -1829,7 +1795,7 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
* call for later, so that the respin logic sees the
* updated bridge state.
*/
mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
}
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
......@@ -1982,7 +1948,7 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
break;
}
mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment