Commit 7c47cedf authored by Stephen Hemminger's avatar Stephen Hemminger

vxlan: move IGMP join/leave to work queue

Do join/leave from work queue to avoid lock inversion problems
between normal socket and RTNL. The code comes out cleaner
as well.

Uses Cong Wang's suggestion to turn refcnt into a real atomic
since now need to handle case where last use of socket is IGMP
worker.
Signed-off-by: default avatarStephen Hemminger <stephen@networkplumber.org>
parent 758c57d1
...@@ -85,7 +85,7 @@ struct vxlan_sock { ...@@ -85,7 +85,7 @@ struct vxlan_sock {
struct hlist_node hlist; struct hlist_node hlist;
struct rcu_head rcu; struct rcu_head rcu;
struct work_struct del_work; struct work_struct del_work;
unsigned int refcnt; atomic_t refcnt;
struct socket *sock; struct socket *sock;
struct hlist_head vni_list[VNI_HASH_SIZE]; struct hlist_head vni_list[VNI_HASH_SIZE];
}; };
...@@ -131,6 +131,7 @@ struct vxlan_dev { ...@@ -131,6 +131,7 @@ struct vxlan_dev {
__u8 ttl; __u8 ttl;
u32 flags; /* VXLAN_F_* below */ u32 flags; /* VXLAN_F_* below */
struct work_struct igmp_work;
unsigned long age_interval; unsigned long age_interval;
struct timer_list age_timer; struct timer_list age_timer;
spinlock_t hash_lock; spinlock_t hash_lock;
...@@ -648,76 +649,58 @@ static bool vxlan_snoop(struct net_device *dev, ...@@ -648,76 +649,58 @@ static bool vxlan_snoop(struct net_device *dev,
/* See if multicast group is already in use by other ID */ /* See if multicast group is already in use by other ID */
static bool vxlan_group_used(struct vxlan_net *vn, static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
const struct vxlan_dev *this)
{ {
struct vxlan_dev *vxlan; struct vxlan_dev *vxlan;
list_for_each_entry(vxlan, &vn->vxlan_list, next) { list_for_each_entry(vxlan, &vn->vxlan_list, next) {
if (vxlan == this)
continue;
if (!netif_running(vxlan->dev)) if (!netif_running(vxlan->dev))
continue; continue;
if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip) if (vxlan->default_dst.remote_ip == remote_ip)
return true; return true;
} }
return false; return false;
} }
/* kernel equivalent to IP_ADD_MEMBERSHIP */ static void vxlan_sock_hold(struct vxlan_sock *vs)
static int vxlan_join_group(struct net_device *dev)
{ {
struct vxlan_dev *vxlan = netdev_priv(dev); atomic_inc(&vs->refcnt);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); }
struct sock *sk = vxlan->vn_sock->sock->sk;
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
.imr_ifindex = vxlan->default_dst.remote_ifindex,
};
int err;
/* Already a member of group */
if (vxlan_group_used(vn, vxlan))
return 0;
/* Need to drop RTNL to call multicast join */ static void vxlan_sock_release(struct vxlan_sock *vs)
rtnl_unlock(); {
lock_sock(sk); if (!atomic_dec_and_test(&vs->refcnt))
err = ip_mc_join_group(sk, &mreq); return;
release_sock(sk);
rtnl_lock();
return err; hlist_del_rcu(&vs->hlist);
queue_work(vxlan_wq, &vs->del_work);
} }
/* Callback to update multicast group membership.
/* kernel equivalent to IP_DROP_MEMBERSHIP */ * Scheduled when vxlan goes up/down.
static int vxlan_leave_group(struct net_device *dev) */
static void vxlan_igmp_work(struct work_struct *work)
{ {
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
int err = 0; struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vxlan->vn_sock->sock->sk; struct sock *sk = vs->sock->sk;
struct ip_mreqn mreq = { struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
.imr_ifindex = vxlan->default_dst.remote_ifindex, .imr_ifindex = vxlan->default_dst.remote_ifindex,
}; };
/* Only leave group when last vxlan is done. */
if (vxlan_group_used(vn, vxlan))
return 0;
/* Need to drop RTNL to call multicast leave */
rtnl_unlock();
lock_sock(sk); lock_sock(sk);
err = ip_mc_leave_group(sk, &mreq); if (vxlan_group_used(vn, vxlan->default_dst.remote_ip))
ip_mc_join_group(sk, &mreq);
else
ip_mc_leave_group(sk, &mreq);
release_sock(sk); release_sock(sk);
rtnl_lock();
return err; vxlan_sock_release(vs);
dev_put(vxlan->dev);
} }
/* Callback from net/ipv4/udp.c to receive packets */ /* Callback from net/ipv4/udp.c to receive packets */
...@@ -1249,12 +1232,11 @@ static int vxlan_init(struct net_device *dev) ...@@ -1249,12 +1232,11 @@ static int vxlan_init(struct net_device *dev)
static int vxlan_open(struct net_device *dev) static int vxlan_open(struct net_device *dev)
{ {
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) { if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
err = vxlan_join_group(dev); vxlan_sock_hold(vxlan->vn_sock);
if (err) dev_hold(dev);
return err; queue_work(vxlan_wq, &vxlan->igmp_work);
} }
if (vxlan->age_interval) if (vxlan->age_interval)
...@@ -1285,8 +1267,11 @@ static int vxlan_stop(struct net_device *dev) ...@@ -1285,8 +1267,11 @@ static int vxlan_stop(struct net_device *dev)
{ {
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = netdev_priv(dev);
if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
vxlan_leave_group(dev); vxlan_sock_hold(vxlan->vn_sock);
dev_hold(dev);
queue_work(vxlan_wq, &vxlan->igmp_work);
}
del_timer_sync(&vxlan->age_timer); del_timer_sync(&vxlan->age_timer);
...@@ -1355,6 +1340,7 @@ static void vxlan_setup(struct net_device *dev) ...@@ -1355,6 +1340,7 @@ static void vxlan_setup(struct net_device *dev)
INIT_LIST_HEAD(&vxlan->next); INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock); spin_lock_init(&vxlan->hash_lock);
INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work);
init_timer_deferrable(&vxlan->age_timer); init_timer_deferrable(&vxlan->age_timer);
vxlan->age_timer.function = vxlan_cleanup; vxlan->age_timer.function = vxlan_cleanup;
...@@ -1498,8 +1484,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) ...@@ -1498,8 +1484,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
udp_sk(sk)->encap_type = 1; udp_sk(sk)->encap_type = 1;
udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
udp_encap_enable(); udp_encap_enable();
atomic_set(&vs->refcnt, 1);
vs->refcnt = 1;
return vs; return vs;
} }
...@@ -1589,7 +1575,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, ...@@ -1589,7 +1575,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
vs = vxlan_find_port(net, vxlan->dst_port); vs = vxlan_find_port(net, vxlan->dst_port);
if (vs) if (vs)
++vs->refcnt; atomic_inc(&vs->refcnt);
else { else {
/* Drop lock because socket create acquires RTNL lock */ /* Drop lock because socket create acquires RTNL lock */
rtnl_unlock(); rtnl_unlock();
...@@ -1606,12 +1592,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, ...@@ -1606,12 +1592,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
err = register_netdevice(dev); err = register_netdevice(dev);
if (err) { if (err) {
if (--vs->refcnt == 0) { vxlan_sock_release(vs);
rtnl_unlock();
sk_release_kernel(vs->sock->sk);
kfree(vs);
rtnl_lock();
}
return err; return err;
} }
...@@ -1629,11 +1610,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) ...@@ -1629,11 +1610,7 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
hlist_del_rcu(&vxlan->hlist); hlist_del_rcu(&vxlan->hlist);
list_del(&vxlan->next); list_del(&vxlan->next);
unregister_netdevice_queue(dev, head); unregister_netdevice_queue(dev, head);
vxlan_sock_release(vs);
if (--vs->refcnt == 0) {
hlist_del_rcu(&vs->hlist);
queue_work(vxlan_wq, &vs->del_work);
}
} }
static size_t vxlan_get_size(const struct net_device *dev) static size_t vxlan_get_size(const struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment