Commit 1424c3e3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "The usual collection of small driver bug fixes:

   - Fix error unwind bugs in hfi1, irdma rtrs

   - Old bug with IPoIB children interfaces possibly using the wrong
     number of queues

   - Really old bug in usnic calling iommu_map in an atomic context

   - Recent regression from the DMABUF locking rework

   - Missing user data validation in MANA"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/rtrs: Don't call kobject_del for srv_path->kobj
  RDMA/mana_ib: Prevent array underflow in mana_ib_create_qp_raw()
  IB/hfi1: Assign npages earlier
  RDMA/umem: Use dma-buf locked API to solve deadlock
  RDMA/usnic: use iommu_map_atomic() under spin_lock()
  RDMA/irdma: Fix potential NULL-ptr-dereference
  IB/IPoIB: Fix legacy IPoIB due to wrong number of queues
  IB/hfi1: Restore allocated resources on failed copyout
parents e544a074 2de49fb1
......@@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
if (umem_dmabuf->sgt)
goto wait_fence;
sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
DMA_BIDIRECTIONAL);
sgt = dma_buf_map_attachment(umem_dmabuf->attach,
DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
......@@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
umem_dmabuf->last_sg_trim = 0;
}
dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
DMA_BIDIRECTIONAL);
umem_dmabuf->sgt = NULL;
}
......
......@@ -1318,12 +1318,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
return -EFAULT;
ret = -EFAULT;
addr = arg + offsetof(struct hfi1_tid_info, length);
if (copy_to_user((void __user *)addr, &tinfo.length,
if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
if (ret)
hfi1_user_exp_rcv_invalid(fd, &tinfo);
}
return ret;
......
......@@ -160,16 +160,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int pinned;
unsigned int npages;
unsigned int npages = tidbuf->npages;
unsigned long vaddr = tidbuf->vaddr;
struct page **pages = NULL;
struct hfi1_devdata *dd = fd->uctxt->dd;
/* Get the number of pages the user buffer spans */
npages = num_user_pages(vaddr, tidbuf->length);
if (!npages)
return -EINVAL;
if (npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
......@@ -196,7 +191,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return pinned;
}
tidbuf->pages = pages;
tidbuf->npages = npages;
fd->tid_n_pinned += pinned;
return pinned;
}
......@@ -274,6 +268,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
......
......@@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
continue;
idev = in_dev_get(ip_dev);
if (!idev)
continue;
in_dev_for_each_ifa_rtnl(ifa, idev) {
ibdev_dbg(&iwdev->ibdev,
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
......
......@@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
/* IB ports start with 1, MANA Ethernet ports start with 0 */
port = ucmd.port;
if (ucmd.port > mc->num_ports)
if (port < 1 || port > mc->num_ports)
return -EINVAL;
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
......
......@@ -276,8 +276,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
size = pa_end - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags);
err = iommu_map_atomic(pd->domain, va_start,
pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
......@@ -293,8 +293,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
size = pa - pa_start + PAGE_SIZE;
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
size, flags);
err = iommu_map_atomic(pd->domain, va_start,
pa_start, size, flags);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
......
......@@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
rc = netif_set_real_num_tx_queues(dev, 1);
if (rc)
goto out;
rc = netif_set_real_num_rx_queues(dev, 1);
if (rc)
goto out;
}
priv->rn_ops = dev->netdev_ops;
......
......@@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
if (srv_path->kobj.state_in_sysfs) {
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment