Commit ab2aa486 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Nothing very exciting here, it has been a quiet cycle overall. Usual
  collection of small bug fixes:

   - irdma issues with CQ entries, VLAN completions and a mutex deadlock

   - Incorrect DCT packets in mlx5

   - Userspace triggered overflows in qib

   - Locking error in hfi

   - Typo in errno value in qib/hfi1

   - Double free in qedr

   - Leak of random kernel memory to userspace with a netlink callback"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/sa_query: Use strscpy_pad instead of memcpy to copy a string
  RDMA/irdma: Do not hold qos mutex twice on QP resume
  RDMA/irdma: Set VLAN in UD work completion correctly
  RDMA/mlx5: Initialize the ODP xarray when creating an ODP MR
  rdma/qedr: Fix crash due to redundant release of device's qp memory
  RDMA/rdmavt: Fix error code in rvt_create_qp()
  IB/hfi1: Fix abba locking issue with sc_disable()
  IB/qib: Protect from buffer overflow in struct qib_user_sdma_pkt fields
  RDMA/mlx5: Set user priority for DCT
  RDMA/irdma: Process extended CQ entries correctly
parents d25f2743 64733956
......@@ -706,7 +706,8 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
strscpy_pad(header->device_name,
dev_name(&query->port->agent->device->dev),
LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
......
......@@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
{
u64 reg;
struct pio_buf *pbuf;
LIST_HEAD(wake_list);
if (!sc)
return;
......@@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
while (!list_empty(&sc->piowait)) {
if (!list_empty(&sc->piowait))
list_move(&sc->piowait, &wake_list);
write_sequnlock(&sc->waitlock);
while (!list_empty(&wake_list)) {
struct iowait *wait;
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
wait = list_first_entry(&sc->piowait, struct iowait, list);
wait = list_first_entry(&wake_list, struct iowait, list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
write_sequnlock(&sc->waitlock);
spin_unlock_irq(&sc->alloc_lock);
}
......
......@@ -1092,12 +1092,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *)((u8 *)cqe + 32);
get_64bit_val(ext_cqe, 24, &qword7);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, 24, &qword7);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}
......
......@@ -3399,9 +3399,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
}
if (cq_poll_info->ud_vlan_valid) {
entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
entry->wc_flags |= IB_WC_WITH_VLAN;
u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
if (vlan) {
entry->vlan_id = vlan;
entry->wc_flags |= IB_WC_WITH_VLAN;
}
} else {
entry->sl = 0;
}
......
......@@ -330,9 +330,11 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
tc_node->enable = true;
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
if (ret)
if (ret) {
vsi->unregister_qset(vsi, tc_node);
goto reg_err;
}
}
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Using node %d which represents VSI %d TC %d\n",
tc_node->index, vsi->vsi_idx, traffic_class);
......@@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
}
goto exit;
reg_err:
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
list_del(&tc_node->siblings);
irdma_free_node(vsi, tc_node);
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
......@@ -369,11 +375,6 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
exit:
mutex_unlock(&vsi->dev->ws_mutex);
return ret;
reg_err:
mutex_unlock(&vsi->dev->ws_mutex);
irdma_ws_remove(vsi, user_pri);
return ret;
}
/**
......
......@@ -1339,7 +1339,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
goto err_2;
}
mr->mmkey.type = MLX5_MKEY_MR;
mr->desc_size = sizeof(struct mlx5_mtt);
mr->umem = umem;
set_mr_fields(dev, mr, umem->length, access_flags);
kvfree(in);
......@@ -1533,6 +1532,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
}
xa_init(&mr->implicit_children);
odp->private = mr;
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
......
......@@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
......
......@@ -455,6 +455,7 @@ struct qedr_qp {
/* synchronization objects used with iwarp ep */
struct kref refcnt;
struct completion iwarp_cm_comp;
struct completion qp_rel_comp;
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
......
......@@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
{
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
kfree(qp);
complete(&qp->qp_rel_comp);
}
static void
......
......@@ -1357,6 +1357,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
kref_init(&qp->refcnt);
init_completion(&qp->iwarp_cm_comp);
init_completion(&qp->qp_rel_comp);
}
qp->pd = pd;
......@@ -2857,8 +2858,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_free_qp_resources(dev, qp, udata);
if (rdma_protocol_iwarp(&dev->ibdev, 1))
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qedr_iw_qp_rem_ref(&qp->ibqp);
wait_for_completion(&qp->qp_rel_comp);
}
return 0;
}
......
......@@ -602,7 +602,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
/*
* How many pages in this iovec element?
*/
static int qib_user_sdma_num_pages(const struct iovec *iov)
static size_t qib_user_sdma_num_pages(const struct iovec *iov)
{
const unsigned long addr = (unsigned long) iov->iov_base;
const unsigned long len = iov->iov_len;
......@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
unsigned long addr, int tlen, int npages)
unsigned long addr, int tlen, size_t npages)
{
struct page *pages[8];
int i, j;
......@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
unsigned long idx;
for (idx = 0; idx < niov; idx++) {
const int npages = qib_user_sdma_num_pages(iov + idx);
const size_t npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
......@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
int npages = 0;
int bytes_togo = 0;
size_t npages = 0;
size_t bytes_togo = 0;
int tiddma = 0;
int cfur;
......@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
npages += qib_user_sdma_num_pages(&iov[idx]);
bytes_togo += slen;
if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
bytes_togo > type_max(typeof(pkt->bytes_togo))) {
ret = -EINVAL;
goto free_pbc;
}
pktnwc += slen >> 2;
idx++;
nfrags++;
......@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
}
if (frag_size) {
int tidsmsize, n;
size_t pktsize;
size_t tidsmsize, n, pktsize, sz, addrlimit;
n = npages*((2*PAGE_SIZE/frag_size)+1);
pktsize = struct_size(pkt, addr, n);
......@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
else
tidsmsize = 0;
pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
if (check_add_overflow(pktsize, tidsmsize, &sz)) {
ret = -EINVAL;
goto free_pbc;
}
pkt = kmalloc(sz, GFP_KERNEL);
if (!pkt) {
ret = -ENOMEM;
goto free_pbc;
}
pkt->largepkt = 1;
pkt->frag_size = frag_size;
pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
&addrlimit) ||
addrlimit > type_max(typeof(pkt->addrlimit))) {
ret = -EINVAL;
goto free_pbc;
}
pkt->addrlimit = addrlimit;
if (tiddma) {
char *tidsm = (char *)pkt + pktsize;
......
......@@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
spin_lock(&rdi->n_qps_lock);
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
spin_unlock(&rdi->n_qps_lock);
ret = ENOMEM;
ret = -ENOMEM;
goto bail_ip;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment