Commit 3954e1d0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Over the break a few defects were found, so this is a -rc style pull
  request of various small things that have been posted.

   - An attempt to shorten RCU grace period driven delays showed crashes
     during heavier testing, and has been entirely reverted

   - A missed merge/rebase error between the advise_mr and ib_device_ops
     series

   - Some small static analysis driven fixes from Julia and Aditya

   - Missed ability to create a XRC_INI in the devx verbs interop
     series"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  infiniband/qedr: Potential null ptr dereference of qp
  infiniband: bnxt_re: qplib: Check the return value of send_message
  IB/ipoib: drop useless LIST_HEAD
  IB/core: Add advise_mr to the list of known ops
  Revert "IB/mlx5: Fix long EEH recover time with NVMe offloads"
  IB/mlx5: Allow XRC INI usage via verbs in DEVX context
parents a8a6b118 9c6260de
...@@ -1232,6 +1232,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) ...@@ -1232,6 +1232,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
} while (0) } while (0)
SET_DEVICE_OP(dev_ops, add_gid); SET_DEVICE_OP(dev_ops, add_gid);
SET_DEVICE_OP(dev_ops, advise_mr);
SET_DEVICE_OP(dev_ops, alloc_dm); SET_DEVICE_OP(dev_ops, alloc_dm);
SET_DEVICE_OP(dev_ops, alloc_fmr); SET_DEVICE_OP(dev_ops, alloc_fmr);
SET_DEVICE_OP(dev_ops, alloc_hw_stats); SET_DEVICE_OP(dev_ops, alloc_hw_stats);
......
...@@ -780,9 +780,8 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) ...@@ -780,9 +780,8 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
req.cos0 = cpu_to_le16(cids[0]); req.cos0 = cpu_to_le16(cids[0]);
req.cos1 = cpu_to_le16(cids[1]); req.cos1 = cpu_to_le16(cids[1]);
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, return bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
0); NULL, 0);
return 0;
} }
int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
......
...@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */ /* Wait until all page fault handlers using the mr complete. */
if (mr->umem && mr->umem->is_odp) synchronize_srcu(&dev->mr_srcu);
synchronize_srcu(&dev->mr_srcu);
#endif #endif
return err; return err;
...@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
bool odp_mkey_exist = false;
#endif
struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
LIST_HEAD(del_list); LIST_HEAD(del_list);
...@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
break; break;
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (mr->umem && mr->umem->is_odp)
odp_mkey_exist = true;
#endif
list_move(&mr->list, &del_list); list_move(&mr->list, &del_list);
ent->cur--; ent->cur--;
ent->size--; ent->size--;
...@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (odp_mkey_exist) synchronize_srcu(&dev->mr_srcu);
synchronize_srcu(&dev->mr_srcu);
#endif #endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
...@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
{ {
struct mlx5_mr_cache *cache = &dev->cache; struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c]; struct mlx5_cache_ent *ent = &cache->ent[c];
bool odp_mkey_exist = false;
struct mlx5_ib_mr *tmp_mr; struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
LIST_HEAD(del_list); LIST_HEAD(del_list);
...@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
break; break;
} }
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
if (mr->umem && mr->umem->is_odp)
odp_mkey_exist = true;
list_move(&mr->list, &del_list); list_move(&mr->list, &del_list);
ent->cur--; ent->cur--;
ent->size--; ent->size--;
...@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ...@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (odp_mkey_exist) synchronize_srcu(&dev->mr_srcu);
synchronize_srcu(&dev->mr_srcu);
#endif #endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
......
...@@ -837,7 +837,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -837,7 +837,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem; goto err_umem;
} }
uid = (attr->qp_type != IB_QPT_XRC_TGT) ? to_mpd(pd)->uid : 0; uid = (attr->qp_type != IB_QPT_XRC_TGT &&
attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
MLX5_SET(create_qp_in, *in, uid, uid); MLX5_SET(create_qp_in, *in, uid, uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem) if (ubuffer->umem)
......
...@@ -492,6 +492,8 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -492,6 +492,8 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int i; int i;
qp = idr_find(&dev->qpidr.idr, conn_param->qpn); qp = idr_find(&dev->qpidr.idr, conn_param->qpn);
if (unlikely(!qp))
return -EINVAL;
laddr = (struct sockaddr_in *)&cm_id->m_local_addr; laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
......
...@@ -669,7 +669,6 @@ static void __ipoib_reap_ah(struct net_device *dev) ...@@ -669,7 +669,6 @@ static void __ipoib_reap_ah(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_ah *ah, *tah; struct ipoib_ah *ah, *tah;
LIST_HEAD(remove_list);
unsigned long flags; unsigned long flags;
netif_tx_lock_bh(dev); netif_tx_lock_bh(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment