Commit 8fea9f8f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "This includes a fix for a significant security miss in checking the
  RDMA_NLDEV_CMD_SYS_SET operation.

  Summary:

   - UAF in SRP

   - Error unwind failure in siw connection management

   - Missing error checks

   - NULL/ERR_PTR confusion in erdma

   - Possible string truncation in CMA configfs and mlx4

   - Data ordering issue in bnxt_re

   - Missing stats decrement on object destroy in bnxt_re

   - Mlx5 bugs in this merge window:
      * Incorrect access_flag in the new mkey cache
      * Missing unlock on error in flow steering
      * lockdep possible deadlock on new mkey cache destruction (Plus a
        fix for this too)

   - Don't leak kernel stack memory to userspace in the CM

   - Missing permission validation for RDMA_NLDEV_CMD_SYS_SET"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/core: Require admin capabilities to set system parameters
  RDMA/mlx5: Remove not-used cache disable flag
  RDMA/cma: Initialize ib_sa_multicast structure to 0 when join
  RDMA/mlx5: Fix mkey cache possible deadlock on cleanup
  RDMA/mlx5: Fix NULL string error
  RDMA/mlx5: Fix mutex unlocking on error flow for steering anchor creation
  RDMA/mlx5: Fix assigning access flags to cache mkeys
  IB/mlx4: Fix the size of a buffer in add_port_entries()
  RDMA/bnxt_re: Decrement resource stats correctly
  RDMA/bnxt_re: Fix the handling of control path response data
  RDMA/cma: Fix truncation compilation warning in make_cma_ports
  RDMA/erdma: Fix NULL pointer access in regmr_cmd
  RDMA/erdma: Fix error code in erdma_create_scatter_mtt()
  RDMA/uverbs: Fix typo of sizeof argument
  RDMA/cxgb4: Check skb value for failure to allocate
  RDMA/siw: Fix connection failure handling
  RDMA/srp: Do not call scsi_done() from srp_abort()
parents 82714078 c38d23a5
...@@ -4968,7 +4968,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, ...@@ -4968,7 +4968,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
int err = 0; int err = 0;
struct sockaddr *addr = (struct sockaddr *)&mc->addr; struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
struct ib_sa_multicast ib; struct ib_sa_multicast ib = {};
enum ib_gid_type gid_type; enum ib_gid_type gid_type;
bool send_only; bool send_only;
......
...@@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group, ...@@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
return -ENOMEM; return -ENOMEM;
for (i = 0; i < ports_num; i++) { for (i = 0; i < ports_num; i++) {
char port_str[10]; char port_str[11];
ports[i].port_num = i + 1; ports[i].port_num = i + 1;
snprintf(port_str, sizeof(port_str), "%u", i + 1); snprintf(port_str, sizeof(port_str), "%u", i + 1);
......
...@@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { ...@@ -2529,6 +2529,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
}, },
[RDMA_NLDEV_CMD_SYS_SET] = { [RDMA_NLDEV_CMD_SYS_SET] = {
.doit = nldev_set_sys_set_doit, .doit = nldev_set_sys_set_doit,
.flags = RDMA_NL_ADMIN_PERM,
}, },
[RDMA_NLDEV_CMD_STAT_SET] = { [RDMA_NLDEV_CMD_STAT_SET] = {
.doit = nldev_stat_set_doit, .doit = nldev_stat_set_doit,
......
...@@ -546,7 +546,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, ...@@ -546,7 +546,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
if (hdr->in_words * 4 != count) if (hdr->in_words * 4 != count)
return -EINVAL; return -EINVAL;
if (count < method_elm->req_size + sizeof(hdr)) { if (count < method_elm->req_size + sizeof(*hdr)) {
/* /*
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
* with a 16 byte write instead of 24. Old kernels didn't * with a 16 byte write instead of 24. Old kernels didn't
......
...@@ -910,6 +910,10 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) ...@@ -910,6 +910,10 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
list_del(&qp->list); list_del(&qp->list);
mutex_unlock(&rdev->qp_lock); mutex_unlock(&rdev->qp_lock);
atomic_dec(&rdev->stats.res.qp_count); atomic_dec(&rdev->stats.res.qp_count);
if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
atomic_dec(&rdev->stats.res.rc_qp_count);
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
ib_umem_release(qp->rumem); ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem); ib_umem_release(qp->sumem);
......
...@@ -665,7 +665,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, ...@@ -665,7 +665,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
blocked = cookie & RCFW_CMD_IS_BLOCKING; blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE; cookie &= RCFW_MAX_COOKIE_VALUE;
crsqe = &rcfw->crsqe_tbl[cookie]; crsqe = &rcfw->crsqe_tbl[cookie];
crsqe->is_in_used = false;
if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED, if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
&rcfw->cmdq.flags), &rcfw->cmdq.flags),
...@@ -681,8 +680,14 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, ...@@ -681,8 +680,14 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
atomic_dec(&rcfw->timeout_send); atomic_dec(&rcfw->timeout_send);
if (crsqe->is_waiter_alive) { if (crsqe->is_waiter_alive) {
if (crsqe->resp) if (crsqe->resp) {
memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
/* Insert write memory barrier to ensure that
* response data is copied before clearing the
* flags
*/
smp_wmb();
}
if (!blocked) if (!blocked)
wait_cmds++; wait_cmds++;
} }
...@@ -694,6 +699,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, ...@@ -694,6 +699,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
if (!is_waiter_alive) if (!is_waiter_alive)
crsqe->resp = NULL; crsqe->resp = NULL;
crsqe->is_in_used = false;
hwq->cons += req_size; hwq->cons += req_size;
/* This is a case to handle below scenario - /* This is a case to handle below scenario -
......
...@@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) ...@@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
int win; int win;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
if (!skb)
return -ENOMEM;
req = __skb_put_zero(skb, sizeof(*req)); req = __skb_put_zero(skb, sizeof(*req));
req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
......
...@@ -133,8 +133,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp) ...@@ -133,8 +133,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
{ {
struct erdma_pd *pd = to_epd(mr->ibmr.pd); struct erdma_pd *pd = to_epd(mr->ibmr.pd);
u32 mtt_level = ERDMA_MR_MTT_0LEVEL;
struct erdma_cmdq_reg_mr_req req; struct erdma_cmdq_reg_mr_req req;
u32 mtt_level;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR); erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
...@@ -147,10 +147,9 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr) ...@@ -147,10 +147,9 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist); req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
mtt_level = mr->mem.mtt->level; mtt_level = mr->mem.mtt->level;
} }
} else { } else if (mr->type != ERDMA_MR_TYPE_DMA) {
memcpy(req.phy_addr, mr->mem.mtt->buf, memcpy(req.phy_addr, mr->mem.mtt->buf,
MTT_SIZE(mr->mem.page_cnt)); MTT_SIZE(mr->mem.page_cnt));
mtt_level = ERDMA_MR_MTT_0LEVEL;
} }
req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) | req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
...@@ -655,7 +654,7 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev, ...@@ -655,7 +654,7 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
mtt = kzalloc(sizeof(*mtt), GFP_KERNEL); mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
if (!mtt) if (!mtt)
return NULL; return ERR_PTR(-ENOMEM);
mtt->size = ALIGN(size, PAGE_SIZE); mtt->size = ALIGN(size, PAGE_SIZE);
mtt->buf = vzalloc(mtt->size); mtt->buf = vzalloc(mtt->size);
......
...@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, ...@@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
static int add_port_entries(struct mlx4_ib_dev *device, int port_num) static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
{ {
int i; int i;
char buff[11]; char buff[12];
struct mlx4_ib_iov_port *port = NULL; struct mlx4_ib_iov_port *port = NULL;
int ret = 0 ; int ret = 0 ;
struct ib_port_attr attr; struct ib_port_attr attr;
......
...@@ -2470,8 +2470,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( ...@@ -2470,8 +2470,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
mlx5_steering_anchor_destroy_res(ft_prio); mlx5_steering_anchor_destroy_res(ft_prio);
put_flow_table: put_flow_table:
put_flow_table(dev, ft_prio, true); put_flow_table(dev, ft_prio, true);
mutex_unlock(&dev->flow_db->lock);
free_obj: free_obj:
mutex_unlock(&dev->flow_db->lock);
kfree(obj); kfree(obj);
return err; return err;
......
...@@ -2084,7 +2084,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) ...@@ -2084,7 +2084,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
case MLX5_IB_MMAP_DEVICE_MEM: case MLX5_IB_MMAP_DEVICE_MEM:
return "Device Memory"; return "Device Memory";
default: default:
return NULL; return "Unknown";
} }
} }
......
...@@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs) ...@@ -301,7 +301,8 @@ static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
{ {
set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1); MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
...@@ -1024,19 +1025,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) ...@@ -1024,19 +1025,26 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
if (!dev->cache.wq) if (!dev->cache.wq)
return; return;
cancel_delayed_work_sync(&dev->cache.remove_ent_dwork);
mutex_lock(&dev->cache.rb_lock); mutex_lock(&dev->cache.rb_lock);
for (node = rb_first(root); node; node = rb_next(node)) { for (node = rb_first(root); node; node = rb_next(node)) {
ent = rb_entry(node, struct mlx5_cache_ent, node); ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys); xa_lock_irq(&ent->mkeys);
ent->disabled = true; ent->disabled = true;
xa_unlock_irq(&ent->mkeys); xa_unlock_irq(&ent->mkeys);
cancel_delayed_work_sync(&ent->dwork);
} }
mutex_unlock(&dev->cache.rb_lock);
/*
* After all entries are disabled and will not reschedule on WQ,
* flush it and all async commands.
*/
flush_workqueue(dev->cache.wq);
mlx5_mkey_cache_debugfs_cleanup(dev); mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
/* At this point all entries are disabled and have no concurrent work. */
mutex_lock(&dev->cache.rb_lock);
node = rb_first(root); node = rb_first(root);
while (node) { while (node) {
ent = rb_entry(node, struct mlx5_cache_ent, node); ent = rb_entry(node, struct mlx5_cache_ent, node);
......
...@@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep) ...@@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_put(cep); siw_cep_put(cep);
new_cep->listen_cep = NULL; new_cep->listen_cep = NULL;
if (rv) { if (rv) {
siw_cancel_mpatimer(new_cep);
siw_cep_set_free(new_cep); siw_cep_set_free(new_cep);
goto error; goto error;
} }
...@@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w) ...@@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w)
/* /*
* Socket close before MPA request received. * Socket close before MPA request received.
*/ */
siw_dbg_cep(cep, "no mpareq: drop listener\n"); if (cep->listen_cep) {
siw_cep_put(cep->listen_cep); siw_dbg_cep(cep,
cep->listen_cep = NULL; "no mpareq: drop listener\n");
siw_cep_put(cep->listen_cep);
cep->listen_cep = NULL;
}
} }
} }
release_cep = 1; release_cep = 1;
...@@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk) ...@@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
if (!cep) if (!cep)
goto out; goto out;
siw_dbg_cep(cep, "state: %d\n", cep->state); siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
cep->state, sk->sk_state);
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
switch (cep->state) { switch (cep->state) {
case SIW_EPSTATE_RDMA_MODE: case SIW_EPSTATE_RDMA_MODE:
......
...@@ -2784,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *scmnd) ...@@ -2784,7 +2784,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
u32 tag; u32 tag;
u16 ch_idx; u16 ch_idx;
struct srp_rdma_ch *ch; struct srp_rdma_ch *ch;
int ret;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
...@@ -2798,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *scmnd) ...@@ -2798,19 +2797,14 @@ static int srp_abort(struct scsi_cmnd *scmnd)
shost_printk(KERN_ERR, target->scsi_host, shost_printk(KERN_ERR, target->scsi_host,
"Sending SRP abort for tag %#x\n", tag); "Sending SRP abort for tag %#x\n", tag);
if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
SRP_TSK_ABORT_TASK, NULL) == 0) SRP_TSK_ABORT_TASK, NULL) == 0) {
ret = SUCCESS;
else if (target->rport->state == SRP_RPORT_LOST)
ret = FAST_IO_FAIL;
else
ret = FAILED;
if (ret == SUCCESS) {
srp_free_req(ch, req, scmnd, 0); srp_free_req(ch, req, scmnd, 0);
scmnd->result = DID_ABORT << 16; return SUCCESS;
scsi_done(scmnd);
} }
if (target->rport->state == SRP_RPORT_LOST)
return FAST_IO_FAIL;
return ret; return FAILED;
} }
static int srp_reset_device(struct scsi_cmnd *scmnd) static int srp_reset_device(struct scsi_cmnd *scmnd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment