Commit ccffa545 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

Revert "IB/mlx5: Fix long EEH recover time with NVMe offloads"

Longer term testing shows this patch didn't play well with MR cache and
caused to call traces during remove_mkeys().

This reverts commit bb7e22a8.
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 7422edce
...@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */ /* Wait until all page fault handlers using the mr complete. */
if (mr->umem && mr->umem->is_odp) synchronize_srcu(&dev->mr_srcu);
synchronize_srcu(&dev->mr_srcu);
#endif #endif
return err; return err;
...@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
bool odp_mkey_exist = false;
#endif
struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
LIST_HEAD(del_list); LIST_HEAD(del_list);
...@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
break; break;
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (mr->umem && mr->umem->is_odp)
odp_mkey_exist = true;
#endif
list_move(&mr->list, &del_list); list_move(&mr->list, &del_list);
ent->cur--; ent->cur--;
ent->size--; ent->size--;
...@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (odp_mkey_exist) synchronize_srcu(&dev->mr_srcu);
synchronize_srcu(&dev->mr_srcu);
#endif #endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
...@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
bool odp_mkey_exist = false;
struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
LIST_HEAD(del_list); LIST_HEAD(del_list);
...@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
break; break;
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
if (mr->umem && mr->umem->is_odp)
odp_mkey_exist = true;
list_move(&mr->list, &del_list); list_move(&mr->list, &del_list);
ent->cur--; ent->cur--;
ent->size--; ent->size--;
...@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (odp_mkey_exist) synchronize_srcu(&dev->mr_srcu);
synchronize_srcu(&dev->mr_srcu);
#endif #endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment