Commit e5366d30 authored by Guy Levi's avatar Guy Levi Committed by Doug Ledford

IB/mlx5: Fix MR registration flow to use UMR properly

Driver shouldn't allow to use UMR to register a MR when
umr_modify_atomic_disabled is set. Otherwise it will always end up with a
failure in the post send flow which sets the UMR WQE to modify atomic access
right.

Fixes: c8d75a98 ("IB/mlx5: Respect new UMR capabilities")
Signed-off-by: default avatarGuy Levi <guyle@mellanox.com>
Reviewed-by: default avatarMoni Shoua <monis@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190731081929.32559-1-leon@kernel.orgSigned-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 9cd58817
...@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); ...@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev) static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{ {
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
} }
static bool use_umr(struct mlx5_ib_dev *dev, int order)
{
return order <= mr_cache_max_order(dev) &&
umr_can_modify_entity_size(dev);
}
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
...@@ -1271,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1271,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL; struct mlx5_ib_mr *mr = NULL;
bool populate_mtts = false; bool use_umr;
struct ib_umem *umem; struct ib_umem *umem;
int page_shift; int page_shift;
int npages; int npages;
...@@ -1303,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1303,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0) if (err < 0)
return ERR_PTR(err); return ERR_PTR(err);
if (use_umr(dev, order)) { use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
(!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
!MLX5_CAP_GEN(dev->mdev, atomic));
if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags); page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) { if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d\n", order); mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL; mr = NULL;
} }
populate_mtts = false;
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) { if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL; err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n"); pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error; goto error;
} }
populate_mtts = true; use_umr = false;
} }
if (!mr) { if (!mr) {
if (!umr_can_modify_entity_size(dev))
populate_mtts = true;
mutex_lock(&dev->slow_path_mutex); mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
page_shift, access_flags, populate_mtts); page_shift, access_flags, !use_umr);
mutex_unlock(&dev->slow_path_mutex); mutex_unlock(&dev->slow_path_mutex);
} }
...@@ -1341,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1341,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr(mr); update_odp_mr(mr);
if (!populate_mtts) { if (use_umr) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
if (access_flags & IB_ACCESS_ON_DEMAND) if (access_flags & IB_ACCESS_ON_DEMAND)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment