Commit 89ea94a7 authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Doug Ledford

IB/mlx5: Reset flow support for IB kernel ULPs

The driver exposes interfaces that directly relate to HW state.
Upon fatal error, consumers of these interfaces (ULPs) that rely
on completion of all their posted work-request could hang, thereby
introducing dependencies in shutdown order. To prevent this from
happening, we manage the relevant resources (CQs, QPs) that are used
by the device. Upon a fatal error, we now generate simulated
completions for outstanding WQEs that were not completed at the
time the HW was reset.

It includes invoking the completion event handler for all involved
CQs so that the ULPs will poll those CQs. When polled we return
simulated CQEs with IB_WC_WR_FLUSH_ERR return code enabling ULPs
to clean up their  resources and not wait forever for completions
upon receiving remove_one.

The above change requires an extra check in the data path to make
sure that when device is in error state, the simulated CQEs will
be returned and no further WQEs will be posted.
Signed-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 7c2344c3
...@@ -424,6 +424,83 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, ...@@ -424,6 +424,83 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
item->key = be32_to_cpu(cqe->mkey); item->key = be32_to_cpu(cqe->mkey);
} }
static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
struct ib_wc *wc, int *npolled)
{
struct mlx5_ib_wq *wq;
unsigned int cur;
unsigned int idx;
int np;
int i;
wq = &qp->sq;
cur = wq->head - wq->tail;
np = *npolled;
if (cur == 0)
return;
for (i = 0; i < cur && np < num_entries; i++) {
idx = wq->last_poll & (wq->wqe_cnt - 1);
wc->wr_id = wq->wrid[idx];
wc->status = IB_WC_WR_FLUSH_ERR;
wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
wq->tail++;
np++;
wc->qp = &qp->ibqp;
wc++;
wq->last_poll = wq->w_list[idx].next;
}
*npolled = np;
}
static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
struct ib_wc *wc, int *npolled)
{
struct mlx5_ib_wq *wq;
unsigned int cur;
int np;
int i;
wq = &qp->rq;
cur = wq->head - wq->tail;
np = *npolled;
if (cur == 0)
return;
for (i = 0; i < cur && np < num_entries; i++) {
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
wc->status = IB_WC_WR_FLUSH_ERR;
wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
wq->tail++;
np++;
wc->qp = &qp->ibqp;
wc++;
}
*npolled = np;
}
static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
struct ib_wc *wc, int *npolled)
{
struct mlx5_ib_qp *qp;
*npolled = 0;
/* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
sw_send_comp(qp, num_entries, wc + *npolled, npolled);
if (*npolled >= num_entries)
return;
}
list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
if (*npolled >= num_entries)
return;
}
}
static int mlx5_poll_one(struct mlx5_ib_cq *cq, static int mlx5_poll_one(struct mlx5_ib_cq *cq,
struct mlx5_ib_qp **cur_qp, struct mlx5_ib_qp **cur_qp,
struct ib_wc *wc) struct ib_wc *wc)
...@@ -594,12 +671,18 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -594,12 +671,18 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{ {
struct mlx5_ib_cq *cq = to_mcq(ibcq); struct mlx5_ib_cq *cq = to_mcq(ibcq);
struct mlx5_ib_qp *cur_qp = NULL; struct mlx5_ib_qp *cur_qp = NULL;
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags; unsigned long flags;
int soft_polled = 0; int soft_polled = 0;
int npolled; int npolled;
int err = 0; int err = 0;
spin_lock_irqsave(&cq->lock, flags); spin_lock_irqsave(&cq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
goto out;
}
if (unlikely(!list_empty(&cq->wc_list))) if (unlikely(!list_empty(&cq->wc_list)))
soft_polled = poll_soft_wc(cq, num_entries, wc); soft_polled = poll_soft_wc(cq, num_entries, wc);
...@@ -612,7 +695,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -612,7 +695,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
if (npolled) if (npolled)
mlx5_cq_set_ci(&cq->mcq); mlx5_cq_set_ci(&cq->mcq);
out:
spin_unlock_irqrestore(&cq->lock, flags); spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN) if (err == 0 || err == -EAGAIN)
...@@ -843,6 +926,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, ...@@ -843,6 +926,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
cq->resize_buf = NULL; cq->resize_buf = NULL;
cq->resize_umem = NULL; cq->resize_umem = NULL;
cq->create_flags = attr->flags; cq->create_flags = attr->flags;
INIT_LIST_HEAD(&cq->list_send_qp);
INIT_LIST_HEAD(&cq->list_recv_qp);
if (context) { if (context) {
err = create_cq_user(dev, udata, context, cq, entries, err = create_cq_user(dev, udata, context, cq, entries,
......
...@@ -1980,6 +1980,65 @@ static void pkey_change_handler(struct work_struct *work) ...@@ -1980,6 +1980,65 @@ static void pkey_change_handler(struct work_struct *work)
mutex_unlock(&ports->devr->mutex); mutex_unlock(&ports->devr->mutex);
} }
static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
{
struct mlx5_ib_qp *mqp;
struct mlx5_ib_cq *send_mcq, *recv_mcq;
struct mlx5_core_cq *mcq;
struct list_head cq_armed_list;
unsigned long flags_qp;
unsigned long flags_cq;
unsigned long flags;
INIT_LIST_HEAD(&cq_armed_list);
/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
spin_lock_irqsave(&mqp->sq.lock, flags_qp);
if (mqp->sq.tail != mqp->sq.head) {
send_mcq = to_mcq(mqp->ibqp.send_cq);
spin_lock_irqsave(&send_mcq->lock, flags_cq);
if (send_mcq->mcq.comp &&
mqp->ibqp.send_cq->comp_handler) {
if (!send_mcq->mcq.reset_notify_added) {
send_mcq->mcq.reset_notify_added = 1;
list_add_tail(&send_mcq->mcq.reset_notify,
&cq_armed_list);
}
}
spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
}
spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
spin_lock_irqsave(&mqp->rq.lock, flags_qp);
/* no handling is needed for SRQ */
if (!mqp->ibqp.srq) {
if (mqp->rq.tail != mqp->rq.head) {
recv_mcq = to_mcq(mqp->ibqp.recv_cq);
spin_lock_irqsave(&recv_mcq->lock, flags_cq);
if (recv_mcq->mcq.comp &&
mqp->ibqp.recv_cq->comp_handler) {
if (!recv_mcq->mcq.reset_notify_added) {
recv_mcq->mcq.reset_notify_added = 1;
list_add_tail(&recv_mcq->mcq.reset_notify,
&cq_armed_list);
}
}
spin_unlock_irqrestore(&recv_mcq->lock,
flags_cq);
}
}
spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
}
/*At that point all inflight post send were put to be executed as of we
* lock/unlock above locks Now need to arm all involved CQs.
*/
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
mcq->comp(mcq);
}
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
}
static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param) enum mlx5_dev_event event, unsigned long param)
{ {
...@@ -1992,6 +2051,7 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, ...@@ -1992,6 +2051,7 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_SYS_ERROR: case MLX5_DEV_EVENT_SYS_ERROR:
ibdev->ib_active = false; ibdev->ib_active = false;
ibev.event = IB_EVENT_DEVICE_FATAL; ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
break; break;
case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_UP:
...@@ -2595,6 +2655,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -2595,6 +2655,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
mutex_init(&dev->flow_db.lock); mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex); mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
if (ll == IB_LINK_LAYER_ETHERNET) { if (ll == IB_LINK_LAYER_ETHERNET) {
err = mlx5_enable_roce(dev); err = mlx5_enable_roce(dev);
......
...@@ -380,6 +380,9 @@ struct mlx5_ib_qp { ...@@ -380,6 +380,9 @@ struct mlx5_ib_qp {
spinlock_t disable_page_faults_lock; spinlock_t disable_page_faults_lock;
struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS]; struct mlx5_ib_pfault pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
#endif #endif
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
}; };
struct mlx5_ib_cq_buf { struct mlx5_ib_cq_buf {
...@@ -441,6 +444,8 @@ struct mlx5_ib_cq { ...@@ -441,6 +444,8 @@ struct mlx5_ib_cq {
struct mlx5_ib_cq_buf *resize_buf; struct mlx5_ib_cq_buf *resize_buf;
struct ib_umem *resize_umem; struct ib_umem *resize_umem;
int cqe_size; int cqe_size;
struct list_head list_send_qp;
struct list_head list_recv_qp;
u32 create_flags; u32 create_flags;
struct list_head wc_list; struct list_head wc_list;
enum ib_cq_notify_flags notify_flags; enum ib_cq_notify_flags notify_flags;
...@@ -621,6 +626,9 @@ struct mlx5_ib_dev { ...@@ -621,6 +626,9 @@ struct mlx5_ib_dev {
struct srcu_struct mr_srcu; struct srcu_struct mr_srcu;
#endif #endif
struct mlx5_ib_flow_db flow_db; struct mlx5_ib_flow_db flow_db;
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
}; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
......
...@@ -1193,12 +1193,16 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1193,12 +1193,16 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
struct mlx5_core_dev *mdev = dev->mdev;
struct umr_common *umrc = &dev->umrc; struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context; struct mlx5_ib_umr_context umr_context;
struct mlx5_umr_wr umrwr = {}; struct mlx5_umr_wr umrwr = {};
struct ib_send_wr *bad; struct ib_send_wr *bad;
int err; int err;
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
mlx5_ib_init_umr_context(&umr_context); mlx5_ib_init_umr_context(&umr_context);
umrwr.wr.wr_cqe = &umr_context.cqe; umrwr.wr.wr_cqe = &umr_context.cqe;
......
...@@ -77,6 +77,10 @@ struct mlx5_wqe_eth_pad { ...@@ -77,6 +77,10 @@ struct mlx5_wqe_eth_pad {
u8 rsvd0[16]; u8 rsvd0[16];
}; };
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
static int is_qp0(enum ib_qp_type qp_type) static int is_qp0(enum ib_qp_type qp_type)
{ {
return qp_type == IB_QPT_SMI; return qp_type == IB_QPT_SMI;
...@@ -609,6 +613,11 @@ static int to_mlx5_st(enum ib_qp_type type) ...@@ -609,6 +613,11 @@ static int to_mlx5_st(enum ib_qp_type type)
} }
} }
static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
{ {
return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
...@@ -1457,6 +1466,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1457,6 +1466,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_create_qp_resp resp; struct mlx5_ib_create_qp_resp resp;
struct mlx5_create_qp_mbox_in *in; struct mlx5_create_qp_mbox_in *in;
struct mlx5_ib_create_qp ucmd; struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
int inlen = sizeof(*in); int inlen = sizeof(*in);
int err; int err;
u32 uidx = MLX5_IB_DEFAULT_UIDX; u32 uidx = MLX5_IB_DEFAULT_UIDX;
...@@ -1714,6 +1726,23 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1714,6 +1726,23 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
base->container_mibqp = qp; base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event; base->mqp.event = mlx5_ib_qp_event;
get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
&send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
/* Maintain device to QPs access, needed for further handling via reset
* flow
*/
list_add_tail(&qp->qps_list, &dev->qp_list);
/* Maintain CQ to QPs access, needed for further handling via reset flow
*/
if (send_cq)
list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
if (recv_cq)
list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0; return 0;
err_create: err_create:
...@@ -1732,23 +1761,23 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv ...@@ -1732,23 +1761,23 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv
if (send_cq) { if (send_cq) {
if (recv_cq) { if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_lock_irq(&send_cq->lock); spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, spin_lock_nested(&recv_cq->lock,
SINGLE_DEPTH_NESTING); SINGLE_DEPTH_NESTING);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
spin_lock_irq(&send_cq->lock); spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock); __acquire(&recv_cq->lock);
} else { } else {
spin_lock_irq(&recv_cq->lock); spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, spin_lock_nested(&send_cq->lock,
SINGLE_DEPTH_NESTING); SINGLE_DEPTH_NESTING);
} }
} else { } else {
spin_lock_irq(&send_cq->lock); spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock); __acquire(&recv_cq->lock);
} }
} else if (recv_cq) { } else if (recv_cq) {
spin_lock_irq(&recv_cq->lock); spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock); __acquire(&send_cq->lock);
} else { } else {
__acquire(&send_cq->lock); __acquire(&send_cq->lock);
...@@ -1763,21 +1792,21 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re ...@@ -1763,21 +1792,21 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
if (recv_cq) { if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock); spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
__release(&recv_cq->lock); __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock(&send_cq->lock);
} else { } else {
spin_unlock(&send_cq->lock); spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock); spin_unlock(&recv_cq->lock);
} }
} else { } else {
__release(&recv_cq->lock); __release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock); spin_unlock(&send_cq->lock);
} }
} else if (recv_cq) { } else if (recv_cq) {
__release(&send_cq->lock); __release(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock); spin_unlock(&recv_cq->lock);
} else { } else {
__release(&recv_cq->lock); __release(&recv_cq->lock);
__release(&send_cq->lock); __release(&send_cq->lock);
...@@ -1789,17 +1818,18 @@ static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) ...@@ -1789,17 +1818,18 @@ static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
return to_mpd(qp->ibqp.pd); return to_mpd(qp->ibqp.pd);
} }
static void get_cqs(struct mlx5_ib_qp *qp, static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
{ {
switch (qp->ibqp.qp_type) { switch (qp_type) {
case IB_QPT_XRC_TGT: case IB_QPT_XRC_TGT:
*send_cq = NULL; *send_cq = NULL;
*recv_cq = NULL; *recv_cq = NULL;
break; break;
case MLX5_IB_QPT_REG_UMR: case MLX5_IB_QPT_REG_UMR:
case IB_QPT_XRC_INI: case IB_QPT_XRC_INI:
*send_cq = to_mcq(qp->ibqp.send_cq); *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = NULL; *recv_cq = NULL;
break; break;
...@@ -1811,8 +1841,8 @@ static void get_cqs(struct mlx5_ib_qp *qp, ...@@ -1811,8 +1841,8 @@ static void get_cqs(struct mlx5_ib_qp *qp,
case IB_QPT_RAW_IPV6: case IB_QPT_RAW_IPV6:
case IB_QPT_RAW_ETHERTYPE: case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_RAW_PACKET: case IB_QPT_RAW_PACKET:
*send_cq = to_mcq(qp->ibqp.send_cq); *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = to_mcq(qp->ibqp.recv_cq); *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
break; break;
case IB_QPT_MAX: case IB_QPT_MAX:
...@@ -1831,6 +1861,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) ...@@ -1831,6 +1861,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base; struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_modify_qp_mbox_in *in; struct mlx5_modify_qp_mbox_in *in;
unsigned long flags;
int err; int err;
if (qp->ibqp.rwq_ind_tbl) { if (qp->ibqp.rwq_ind_tbl) {
...@@ -1861,17 +1892,28 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) ...@@ -1861,17 +1892,28 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
base->mqp.qpn); base->mqp.qpn);
} }
get_cqs(qp, &send_cq, &recv_cq); get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
/* del from lists under both locks above to protect reset flow paths */
list_del(&qp->qps_list);
if (send_cq)
list_del(&qp->cq_send_list);
if (recv_cq)
list_del(&qp->cq_recv_list);
if (qp->create_type == MLX5_QP_KERNEL) { if (qp->create_type == MLX5_QP_KERNEL) {
mlx5_ib_lock_cqs(send_cq, recv_cq);
__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq) if (send_cq != recv_cq)
__mlx5_ib_cq_clean(send_cq, base->mqp.qpn, __mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
NULL); NULL);
mlx5_ib_unlock_cqs(send_cq, recv_cq);
} }
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
destroy_raw_packet_qp(dev, qp); destroy_raw_packet_qp(dev, qp);
...@@ -2559,7 +2601,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -2559,7 +2601,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
} }
pd = get_pd(qp); pd = get_pd(qp);
get_cqs(qp, &send_cq, &recv_cq); get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
...@@ -3658,6 +3701,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3658,6 +3701,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{ {
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_qp *qp; struct mlx5_ib_qp *qp;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg; struct mlx5_wqe_data_seg *dpseg;
...@@ -3685,6 +3729,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -3685,6 +3729,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
goto out;
}
for (nreq = 0; wr; nreq++, wr = wr->next) { for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
...@@ -3986,6 +4037,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -3986,6 +4037,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_data_seg *scat; struct mlx5_wqe_data_seg *scat;
struct mlx5_rwqe_sig *sig; struct mlx5_rwqe_sig *sig;
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags; unsigned long flags;
int err = 0; int err = 0;
int nreq; int nreq;
...@@ -3997,6 +4050,13 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -3997,6 +4050,13 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags); spin_lock_irqsave(&qp->rq.lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
nreq = 0;
goto out;
}
ind = qp->rq.head & (qp->rq.wqe_cnt - 1); ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; nreq++, wr = wr->next) { for (nreq = 0; wr; nreq++, wr = wr->next) {
......
...@@ -458,6 +458,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, ...@@ -458,6 +458,8 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
struct mlx5_ib_srq *srq = to_msrq(ibsrq); struct mlx5_ib_srq *srq = to_msrq(ibsrq);
struct mlx5_wqe_srq_next_seg *next; struct mlx5_wqe_srq_next_seg *next;
struct mlx5_wqe_data_seg *scat; struct mlx5_wqe_data_seg *scat;
struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
struct mlx5_core_dev *mdev = dev->mdev;
unsigned long flags; unsigned long flags;
int err = 0; int err = 0;
int nreq; int nreq;
...@@ -465,6 +467,12 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, ...@@ -465,6 +467,12 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags); spin_lock_irqsave(&srq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
err = -EIO;
*bad_wr = wr;
goto out;
}
for (nreq = 0; wr; nreq++, wr = wr->next) { for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->num_sge > srq->msrq.max_gs)) { if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
err = -EINVAL; err = -EINVAL;
...@@ -507,7 +515,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, ...@@ -507,7 +515,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
*srq->db.db = cpu_to_be32(srq->wqe_ctr); *srq->db.db = cpu_to_be32(srq->wqe_ctr);
} }
out:
spin_unlock_irqrestore(&srq->lock, flags); spin_unlock_irqrestore(&srq->lock, flags);
return err; return err;
......
...@@ -58,6 +58,8 @@ struct mlx5_core_cq { ...@@ -58,6 +58,8 @@ struct mlx5_core_cq {
void (*comp)(struct mlx5_core_cq *); void (*comp)(struct mlx5_core_cq *);
void *priv; void *priv;
} tasklet_ctx; } tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment