Commit 410694e2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull infiniband fixes from Doug Ledford:
 "It's late in the game, I know, but these fixes seemed important enough
  to warrant a late pull request.  They all involve oopses or use after
  frees or corruptions.

  Six serious fixes:

   - Hold the mutex around the find and corresponding update of our gid

   - The ifa list is rcu protected, copy its contents under rcu to avoid
     using a freed structure

   - On error, netdev might be null, so check it before trying to
     release it

   - On init, if workqueue alloc fails, fail init

   - The new demux patches exposed a bug in mlx5 and ipath drivers, we
     need to use the payload P_Key to determine the P_Key the packet
     arrived on because the hardware doesn't tell us the truth

   - Due to a couple convoluted error flows, it is possible for the CM
     to trigger a use_after_free and a double_free of rb nodes.  Add two
     checks to prevent that.  This code has worked for 10+ years.  It is
     likely that some of the recent changes have caused this issue to
     surface.  The current patch will protect us from nasty events for
     now while we track down why this is just now showing up"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/cm: Fix rb-tree duplicate free and use-after-free
  IB/cma: Use inner P_Key to determine netdev
  IB/ucma: check workqueue allocation before usage
  IB/cma: Potential NULL dereference in cma_id_from_event
  IB/core: Fix use after free of ifa
  IB/core: Fix memory corruption in ib_cache_gid_set_default_gid
parents 35df017c 0ca81a28
......@@ -508,12 +508,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
memset(&gid_attr, 0, sizeof(gid_attr));
gid_attr.ndev = ndev;
mutex_lock(&table->lock);
ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
/* Coudn't find default GID location */
WARN_ON(ix < 0);
mutex_lock(&table->lock);
if (!__ib_cache_gid_get(ib_dev, port, ix,
&current_gid, &current_gid_attr) &&
mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
......
......@@ -835,6 +835,11 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
case IB_CM_SIDR_REQ_RCVD:
spin_unlock_irq(&cm_id_priv->lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
rb_erase(&cm_id_priv->sidr_id_node,
&cm.remote_sidr_table);
spin_unlock_irq(&cm.lock);
break;
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
......@@ -3172,7 +3177,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
spin_lock_irqsave(&cm.lock, flags);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
spin_unlock_irqrestore(&cm.lock, flags);
return 0;
......
......@@ -1067,14 +1067,14 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
sizeof(req->local_gid));
req->has_gid = true;
req->service_id = req_param->primary_path->service_id;
req->pkey = req_param->bth_pkey;
req->pkey = be16_to_cpu(req_param->primary_path->pkey);
break;
case IB_CM_SIDR_REQ_RECEIVED:
req->device = sidr_param->listen_id->device;
req->port = sidr_param->port;
req->has_gid = false;
req->service_id = sidr_param->service_id;
req->pkey = sidr_param->bth_pkey;
req->pkey = sidr_param->pkey;
break;
default:
return -EINVAL;
......@@ -1324,7 +1324,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id),
cma_port_from_service_id(req.service_id));
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
if (IS_ERR(id_priv)) {
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
}
......
......@@ -250,25 +250,44 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
u8 port, struct net_device *ndev)
{
struct in_device *in_dev;
struct sin_list {
struct list_head list;
struct sockaddr_in ip;
};
struct sin_list *sin_iter;
struct sin_list *sin_temp;
LIST_HEAD(sin_list);
if (ndev->reg_state >= NETREG_UNREGISTERING)
return;
in_dev = in_dev_get(ndev);
if (!in_dev)
rcu_read_lock();
in_dev = __in_dev_get_rcu(ndev);
if (!in_dev) {
rcu_read_unlock();
return;
}
for_ifa(in_dev) {
struct sockaddr_in ip;
struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
ip.sin_family = AF_INET;
ip.sin_addr.s_addr = ifa->ifa_address;
update_gid_ip(GID_ADD, ib_dev, port, ndev,
(struct sockaddr *)&ip);
if (!entry) {
pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n");
continue;
}
entry->ip.sin_family = AF_INET;
entry->ip.sin_addr.s_addr = ifa->ifa_address;
list_add_tail(&entry->list, &sin_list);
}
endfor_ifa(in_dev);
rcu_read_unlock();
in_dev_put(in_dev);
list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
update_gid_ip(GID_ADD, ib_dev, port, ndev,
(struct sockaddr *)&sin_iter->ip);
list_del(&sin_iter->list);
kfree(sin_iter);
}
}
static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
......
......@@ -1624,11 +1624,16 @@ static int ucma_open(struct inode *inode, struct file *filp)
if (!file)
return -ENOMEM;
file->close_wq = create_singlethread_workqueue("ucma_close_id");
if (!file->close_wq) {
kfree(file);
return -ENOMEM;
}
INIT_LIST_HEAD(&file->event_list);
INIT_LIST_HEAD(&file->ctx_list);
init_waitqueue_head(&file->poll_wait);
mutex_init(&file->mut);
file->close_wq = create_singlethread_workqueue("ucma_close_id");
filp->private_data = file;
file->filp = filp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment