Commit 7a64ca86 authored by Saeed Mahameed's avatar Saeed Mahameed

net/mlx5e: vxlan: Use RCU for vxlan table lookup

Remove the spinlock protecting the vxlan table and use RCU instead.
This will improve performance as it will eliminate contention on data
path cores.

Fixes: b3f63c3d ("net/mlx5e: Add netdev support for VXLAN tunneling")
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Reviewed-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
parent 185901ce
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
struct mlx5_vxlan { struct mlx5_vxlan {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
spinlock_t lock; /* protect vxlan table */
/* max_num_ports is usuallly 4, 16 buckets is more than enough */ /* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4); DECLARE_HASHTABLE(htable, 4);
int num_ports; int num_ports;
...@@ -78,45 +77,46 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) ...@@ -78,45 +77,46 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in); return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in);
} }
static struct mlx5_vxlan_port* struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *retptr = NULL, *vxlanp;
hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) { if (!mlx5_vxlan_allowed(vxlan))
if (vxlanp->udp_port == port) return NULL;
return vxlanp;
}
return NULL; rcu_read_lock();
hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port)
if (vxlanp->udp_port == port) {
retptr = vxlanp;
break;
}
rcu_read_unlock();
return retptr;
} }
struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
if (!mlx5_vxlan_allowed(vxlan)) hash_for_each_possible(vxlan->htable, vxlanp, hlist, port)
return NULL; if (vxlanp->udp_port == port)
return vxlanp;
spin_lock_bh(&vxlan->lock); return NULL;
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
spin_unlock_bh(&vxlan->lock);
return vxlanp;
} }
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
int ret = -ENOSPC; int ret = 0;
vxlanp = mlx5_vxlan_lookup_port(vxlan, port); mutex_lock(&vxlan->sync_lock);
vxlanp = vxlan_lookup_port(vxlan, port);
if (vxlanp) { if (vxlanp) {
refcount_inc(&vxlanp->refcount); refcount_inc(&vxlanp->refcount);
return 0; goto unlock;
} }
mutex_lock(&vxlan->sync_lock);
if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
mlx5_core_info(vxlan->mdev, mlx5_core_info(vxlan->mdev,
"UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
...@@ -138,9 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) ...@@ -138,9 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
vxlanp->udp_port = port; vxlanp->udp_port = port;
refcount_set(&vxlanp->refcount, 1); refcount_set(&vxlanp->refcount, 1);
spin_lock_bh(&vxlan->lock); hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
hash_add(vxlan->htable, &vxlanp->hlist, port);
spin_unlock_bh(&vxlan->lock);
vxlan->num_ports++; vxlan->num_ports++;
mutex_unlock(&vxlan->sync_lock); mutex_unlock(&vxlan->sync_lock);
...@@ -157,34 +155,26 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) ...@@ -157,34 +155,26 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
{ {
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
bool remove = false;
int ret = 0; int ret = 0;
mutex_lock(&vxlan->sync_lock); mutex_lock(&vxlan->sync_lock);
spin_lock_bh(&vxlan->lock); vxlanp = vxlan_lookup_port(vxlan, port);
vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
if (!vxlanp) { if (!vxlanp) {
ret = -ENOENT; ret = -ENOENT;
goto out_unlock; goto out_unlock;
} }
if (refcount_dec_and_test(&vxlanp->refcount)) { if (refcount_dec_and_test(&vxlanp->refcount)) {
hash_del(&vxlanp->hlist); hash_del_rcu(&vxlanp->hlist);
remove = true; synchronize_rcu();
}
out_unlock:
spin_unlock_bh(&vxlan->lock);
if (remove) {
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
kfree(vxlanp); kfree(vxlanp);
vxlan->num_ports--; vxlan->num_ports--;
} }
out_unlock:
mutex_unlock(&vxlan->sync_lock); mutex_unlock(&vxlan->sync_lock);
return ret; return ret;
} }
...@@ -201,7 +191,6 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) ...@@ -201,7 +191,6 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
vxlan->mdev = mdev; vxlan->mdev = mdev;
mutex_init(&vxlan->sync_lock); mutex_init(&vxlan->sync_lock);
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable); hash_init(vxlan->htable);
/* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */ /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment