Commit 61dc3461 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

team: convert overall spinlock to mutex

No need to have spinlock for this purpose. So convert this to mutex and
avoid current schedule while atomic problems in netlink code.
Signed-off-by: default avatarJiri Pirko <jpirko@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8c0713a5
......@@ -443,9 +443,9 @@ static void __team_compute_features(struct team *team)
static void team_compute_features(struct team *team)
{
spin_lock(&team->lock);
mutex_lock(&team->lock);
__team_compute_features(team);
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
}
static int team_port_enter(struct team *team, struct team_port *port)
......@@ -647,7 +647,7 @@ static int team_init(struct net_device *dev)
int i;
team->dev = dev;
spin_lock_init(&team->lock);
mutex_init(&team->lock);
team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
if (!team->pcpu_stats)
......@@ -672,13 +672,13 @@ static void team_uninit(struct net_device *dev)
struct team_port *port;
struct team_port *tmp;
spin_lock(&team->lock);
mutex_lock(&team->lock);
list_for_each_entry_safe(port, tmp, &team->port_list, list)
team_port_del(team, port->dev);
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
}
static void team_destructor(struct net_device *dev)
......@@ -784,7 +784,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
* Alhough this is reader, it's guarded by team lock. It's not possible
* to traverse list in reverse under rcu_read_lock
*/
spin_lock(&team->lock);
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
err = dev_set_mtu(port->dev, new_mtu);
if (err) {
......@@ -793,7 +793,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
goto unwind;
}
}
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
dev->mtu = new_mtu;
......@@ -802,7 +802,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
unwind:
list_for_each_entry_continue_reverse(port, &team->port_list, list)
dev_set_mtu(port->dev, dev->mtu);
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
return err;
}
......@@ -880,9 +880,9 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
struct team *team = netdev_priv(dev);
int err;
spin_lock(&team->lock);
mutex_lock(&team->lock);
err = team_port_add(team, port_dev);
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
return err;
}
......@@ -891,9 +891,9 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
struct team *team = netdev_priv(dev);
int err;
spin_lock(&team->lock);
mutex_lock(&team->lock);
err = team_port_del(team, port_dev);
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
return err;
}
......@@ -1064,13 +1064,13 @@ static struct team *team_nl_team_get(struct genl_info *info)
}
team = netdev_priv(dev);
spin_lock(&team->lock);
mutex_lock(&team->lock);
return team;
}
static void team_nl_team_put(struct team *team)
{
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
dev_put(team->dev);
}
......@@ -1486,9 +1486,9 @@ static void team_port_change_check(struct team_port *port, bool linkup)
{
struct team *team = port->team;
spin_lock(&team->lock);
mutex_lock(&team->lock);
__team_port_change_check(port, linkup);
spin_unlock(&team->lock);
mutex_unlock(&team->lock);
}
/************************************
......
......@@ -92,7 +92,7 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
spinlock_t lock; /* used for overall locking, e.g. port lists write */
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
* port lists with port count
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment