Commit b0d11b42 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

team: avoid possible underflow of count_pending value for notify_peers and mcast_rejoin

This patch is fixing a race condition that may cause setting
count_pending to -1, which results in unwanted big bulk of arp messages
(in case of "notify peers").

Consider following scenario:

count_pending == 2
   CPU0                                           CPU1
					team_notify_peers_work
					  atomic_dec_and_test (dec count_pending to 1)
					  schedule_delayed_work
 team_notify_peers
   atomic_add (adding 1 to count_pending)
					team_notify_peers_work
					  atomic_dec_and_test (dec count_pending to 1)
					  schedule_delayed_work
					team_notify_peers_work
					  atomic_dec_and_test (dec count_pending to 0)
   schedule_delayed_work
					team_notify_peers_work
					  atomic_dec_and_test (dec count_pending to -1)

Fix this race by using atomic_dec_if_positive - that will prevent
count_pending running under 0.

Fixes: fc423ff0 ("team: add peer notification")
Fixes: 492b200e  ("team: add support for sending multicast rejoins")
Signed-off-by: default avatarJiri Pirko <jiri@resnulli.us>
Signed-off-by: default avatarJiri Benc <jbenc@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1ba39804
...@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind) ...@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
static void team_notify_peers_work(struct work_struct *work) static void team_notify_peers_work(struct work_struct *work)
{ {
struct team *team; struct team *team;
int val;
team = container_of(work, struct team, notify_peers.dw.work); team = container_of(work, struct team, notify_peers.dw.work);
...@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work) ...@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
schedule_delayed_work(&team->notify_peers.dw, 0); schedule_delayed_work(&team->notify_peers.dw, 0);
return; return;
} }
val = atomic_dec_if_positive(&team->notify_peers.count_pending);
if (val < 0) {
rtnl_unlock();
return;
}
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock(); rtnl_unlock();
if (!atomic_dec_and_test(&team->notify_peers.count_pending)) if (val)
schedule_delayed_work(&team->notify_peers.dw, schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval)); msecs_to_jiffies(team->notify_peers.interval));
} }
...@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team) ...@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
static void team_mcast_rejoin_work(struct work_struct *work) static void team_mcast_rejoin_work(struct work_struct *work)
{ {
struct team *team; struct team *team;
int val;
team = container_of(work, struct team, mcast_rejoin.dw.work); team = container_of(work, struct team, mcast_rejoin.dw.work);
...@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work) ...@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
schedule_delayed_work(&team->mcast_rejoin.dw, 0); schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return; return;
} }
val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
if (val < 0) {
rtnl_unlock();
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock(); rtnl_unlock();
if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) if (val)
schedule_delayed_work(&team->mcast_rejoin.dw, schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval)); msecs_to_jiffies(team->mcast_rejoin.interval));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment