Commit c85023e1 authored by Huy Nguyen's avatar Huy Nguyen Committed by Doug Ledford

IB/mlx5: Add raw ethernet local loopback support

Currently, unicast/multicast loopback raw ethernet
(non-RDMA) packets are sent back to the vport.
A unicast loopback packet is the packet with destination
MAC address the same as the source MAC address.
For multicast, the destination MAC address is in the
vport's multicast filter list.

Moreover, the local loopback is not needed if
there is one or none user space context.

After this patch, the raw ethernet unicast and multicast
local loopback are disabled by default. When there is more
than one user space context, the local loopback is enabled.

Note that when local loopback is disabled, raw ethernet
packets are not looped back to the vport and are forwarded
to the next routing level (eswitch, or multihost switch,
or out to the wire depending on the configuration).
Signed-off-by: default avatarHuy Nguyen <huyn@mellanox.com>
Reviewed-by: default avatarDaniel Jurgens <danielj@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent bded747b
......@@ -58,6 +58,7 @@
#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "cmd.h"
#include <linux/mlx5/vport.h>
#define DRIVER_NAME "mlx5_ib"
#define DRIVER_VERSION "5.0-0"
......@@ -1187,6 +1188,45 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
return 0;
}
static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
{
int err;
err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
if (err)
return err;
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb))
return err;
mutex_lock(&dev->lb_mutex);
dev->user_td++;
if (dev->user_td == 2)
err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
mutex_unlock(&dev->lb_mutex);
return err;
}
static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
{
mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
!MLX5_CAP_GEN(dev->mdev, disable_local_lb))
return;
mutex_lock(&dev->lb_mutex);
dev->user_td--;
if (dev->user_td < 2)
mlx5_nic_vport_update_local_lb(dev->mdev, false);
mutex_unlock(&dev->lb_mutex);
}
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
......@@ -1295,8 +1335,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->upd_xlt_page_mutex);
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
err = mlx5_core_alloc_transport_domain(dev->mdev,
&context->tdn);
err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
if (err)
goto out_page;
}
......@@ -1362,7 +1401,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
out_td:
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
mlx5_ib_dealloc_transport_domain(dev, context->tdn);
out_page:
free_page(context->upd_xlt_page);
......@@ -1390,7 +1429,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
bfregi = &context->bfregi;
if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
mlx5_ib_dealloc_transport_domain(dev, context->tdn);
free_page(context->upd_xlt_page);
deallocate_uars(dev, context);
......@@ -3797,6 +3836,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
goto err_umrc;
}
if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
MLX5_CAP_GEN(mdev, disable_local_lb))
mutex_init(&dev->lb_mutex);
dev->ib_active = true;
return dev;
......
......@@ -652,9 +652,13 @@ struct mlx5_ib_dev {
struct list_head qp_list;
/* Array with num_ports elements */
struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
u8 umr_fence;
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
/* protect the user_td */
struct mutex lb_mutex;
u32 user_td;
u8 umr_fence;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
......
......@@ -47,6 +47,7 @@
#include <linux/debugfs.h>
#include <linux/kmod.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
......@@ -579,6 +580,18 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
return err;
}
static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
{
int ret = 0;
/* Disable local_lb by default */
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
MLX5_CAP_GEN(dev, disable_local_lb))
ret = mlx5_nic_vport_update_local_lb(dev, false);
return ret;
}
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
{
u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
......@@ -1155,6 +1168,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_fs;
}
err = mlx5_core_set_hca_defaults(dev);
if (err) {
dev_err(&pdev->dev, "Failed to set hca defaults\n");
goto err_fs;
}
#ifdef CONFIG_MLX5_CORE_EN
mlx5_eswitch_attach(dev->priv.eswitch);
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment