Commit b5f3fe27 authored by Guoqing Jiang's avatar Guoqing Jiang Committed by Jason Gunthorpe

RDMA/rxe: Convert spin_{lock_bh,unlock_bh} to spin_{lock_irqsave,unlock_irqrestore}

We need to call spin_lock_irqsave()/spin_unlock_irqrestore() for
state_lock in rxe, otherwsie the callchain:

  ib_post_send_mad
	-> spin_lock_irqsave
	-> ib_post_send -> rxe_post_send
				-> spin_lock_bh
				-> spin_unlock_bh
	-> spin_unlock_irqrestore

Causes below traces during run block nvmeof-mp/001 test due to mismatched
spinlock nesting:

  WARNING: CPU: 0 PID: 94794 at kernel/softirq.c:376 __local_bh_enable_ip+0xc2/0x140
  [ ... ]
  CPU: 0 PID: 94794 Comm: kworker/u4:1 Tainted: G            E      6.4.0-rc1 #9
  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014
  Workqueue: rdma_cm cma_work_handler [rdma_cm]
  RIP: 0010:__local_bh_enable_ip+0xc2/0x140
  Code: 48 85 c0 74 72 5b 41 5c 5d 31 c0 89 c2 89 c1 89 c6 89 c7 41 89 c0 e9 bd 0e 11 01 65 8b 05 f2 65 72 48 85 c0 0f 85 76 ff ff ff <0f> 0b e9 6f ff ff ff e8 d2 39 1c 00 eb 80 4c 89 e7 e8 68 ad 0a 00
  RSP: 0018:ffffb7cf818539f0 EFLAGS: 00010046
  RAX: 0000000000000000 RBX: 0000000000000201 RCX: 0000000000000000
  RDX: 0000000000000000 RSI: 0000000000000201 RDI: ffffffffc0f25f79
  RBP: ffffb7cf81853a00 R08: 0000000000000000 R09: 0000000000000000
  R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffc0f25f79
  R13: ffff8db1f0fa6000 R14: ffff8db2c63ff000 R15: 00000000000000e8
  FS:  0000000000000000(0000) GS:ffff8db33bc00000(0000) knlGS:0000000000000000
  CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
  CR2: 0000559758db0f20 CR3: 0000000105124000 CR4: 00000000003506f0
  Call Trace:
   <TASK>
   _raw_spin_unlock_bh+0x31/0x40
   rxe_post_send+0x59/0x8b0 [rdma_rxe]
   ib_send_mad+0x26b/0x470 [ib_core]
   ib_post_send_mad+0x150/0xb40 [ib_core]
   ? cm_form_tid+0x5b/0x90 [ib_cm]
   ib_send_cm_req+0x7c8/0xb70 [ib_cm]
   rdma_connect_locked+0x433/0x940 [rdma_cm]
   nvme_rdma_cm_handler+0x5d7/0x9c0 [nvme_rdma]
   cma_cm_event_handler+0x4f/0x170 [rdma_cm]
   cma_work_handler+0x6a/0xe0 [rdma_cm]
   process_one_work+0x2a9/0x580
   worker_thread+0x52/0x3f0
   ? __pfx_worker_thread+0x10/0x10
   kthread+0x109/0x140
   ? __pfx_kthread+0x10/0x10
   ret_from_fork+0x2c/0x50
   </TASK>


  raw_local_irq_restore() called with IRQs enabled
  WARNING: CPU: 0 PID: 94794 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x37/0x60
  [ ... ]
  CPU: 0 PID: 94794 Comm: kworker/u4:1 Tainted: G        W   E      6.4.0-rc1 #9
  Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.15.0-0-g2dd4b9b-rebuilt.opensuse.org 04/01/2014
  Workqueue: rdma_cm cma_work_handler [rdma_cm]
  RIP: 0010:warn_bogus_irq_restore+0x37/0x60
  Code: fb 01 77 36 83 e3 01 74 0e 48 8b 5d f8 c9 31 f6 89 f7 e9 ac ea 01 00 48 c7 c7 e0 52 33 b9 c6 05 bb 1c 69 01 01 e8 39 24 f0 fe <0f> 0b 48 8b 5d f8 c9 31 f6 89 f7 e9 89 ea 01 00 0f b6 f3 48 c7 c7
  RSP: 0018:ffffb7cf81853a58 EFLAGS: 00010246
  RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
  RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000
  RBP: ffffb7cf81853a60 R08: 0000000000000000 R09: 0000000000000000
  R10: 0000000000000000 R11: 0000000000000000 R12: ffff8db2cfb1a9e8
  R13: ffff8db2cfb1a9d8 R14: ffff8db2c63ff000 R15: 0000000000000000
  FS:  0000000000000000(0000) GS:ffff8db33bc00000(0000) knlGS:0000000000000000
  CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
  CR2: 0000559758db0f20 CR3: 0000000105124000 CR4: 00000000003506f0
  Call Trace:
   <TASK>
   _raw_spin_unlock_irqrestore+0x91/0xa0
   ib_send_mad+0x1e3/0x470 [ib_core]
   ib_post_send_mad+0x150/0xb40 [ib_core]
   ? cm_form_tid+0x5b/0x90 [ib_cm]
   ib_send_cm_req+0x7c8/0xb70 [ib_cm]
   rdma_connect_locked+0x433/0x940 [rdma_cm]
   nvme_rdma_cm_handler+0x5d7/0x9c0 [nvme_rdma]
   cma_cm_event_handler+0x4f/0x170 [rdma_cm]
   cma_work_handler+0x6a/0xe0 [rdma_cm]
   process_one_work+0x2a9/0x580
   worker_thread+0x52/0x3f0
   ? __pfx_worker_thread+0x10/0x10
   kthread+0x109/0x140
   ? __pfx_kthread+0x10/0x10
   ret_from_fork+0x2c/0x50
   </TASK>

Fixes: f605f26e ("RDMA/rxe: Protect QP state with qp->state_lock")
Link: https://lore.kernel.org/r/20230510035056.881196-1-guoqing.jiang@linux.devSigned-off-by: default avatarGuoqing Jiang <guoqing.jiang@linux.dev>
Reviewed-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 17eabd6a
...@@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode) ...@@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
void retransmit_timer(struct timer_list *t) void retransmit_timer(struct timer_list *t)
{ {
struct rxe_qp *qp = from_timer(qp, t, retrans_timer); struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
unsigned long flags;
rxe_dbg_qp(qp, "retransmit timer fired\n"); rxe_dbg_qp(qp, "retransmit timer fired\n");
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) { if (qp->valid) {
qp->comp.timeout = 1; qp->comp.timeout = 1;
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
...@@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) ...@@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
static void comp_check_sq_drain_done(struct rxe_qp *qp) static void comp_check_sq_drain_done(struct rxe_qp *qp)
{ {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(qp_state(qp) == IB_QPS_SQD)) { if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) { if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
qp->attr.sq_draining = 0; qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp) ...@@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp)
return; return;
} }
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static inline enum comp_state complete_ack(struct rxe_qp *qp, static inline enum comp_state complete_ack(struct rxe_qp *qp,
...@@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt) ...@@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt)
*/ */
static void reset_retry_timer(struct rxe_qp *qp) static void reset_retry_timer(struct rxe_qp *qp)
{ {
unsigned long flags;
if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) { if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) >= IB_QPS_RTS && if (qp_state(qp) >= IB_QPS_RTS &&
psn_compare(qp->req.psn, qp->comp.psn) > 0) psn_compare(qp->req.psn, qp->comp.psn) > 0)
mod_timer(&qp->retrans_timer, mod_timer(&qp->retrans_timer,
jiffies + qp->qp_timeout_jiffies); jiffies + qp->qp_timeout_jiffies);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
} }
...@@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp) ...@@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp)
struct rxe_pkt_info *pkt = NULL; struct rxe_pkt_info *pkt = NULL;
enum comp_state state; enum comp_state state;
int ret; int ret;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR || if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) { qp_state(qp) == IB_QPS_RESET) {
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
drain_resp_pkts(qp); drain_resp_pkts(qp);
flush_send_queue(qp, notify); flush_send_queue(qp, notify);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->comp.timeout) { if (qp->comp.timeout) {
qp->comp.timeout_retry = 1; qp->comp.timeout_retry = 1;
......
...@@ -412,15 +412,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, ...@@ -412,15 +412,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
int err; int err;
int is_request = pkt->mask & RXE_REQ_MASK; int is_request = pkt->mask & RXE_REQ_MASK;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
(!is_request && (qp_state(qp) < IB_QPS_RTR))) { (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
goto drop; goto drop;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_icrc_generate(skb, pkt); rxe_icrc_generate(skb, pkt);
......
...@@ -300,6 +300,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, ...@@ -300,6 +300,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct rxe_cq *rcq = to_rcq(init->recv_cq); struct rxe_cq *rcq = to_rcq(init->recv_cq);
struct rxe_cq *scq = to_rcq(init->send_cq); struct rxe_cq *scq = to_rcq(init->send_cq);
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL; struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
unsigned long flags;
rxe_get(pd); rxe_get(pd);
rxe_get(rcq); rxe_get(rcq);
...@@ -325,10 +326,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, ...@@ -325,10 +326,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
if (err) if (err)
goto err2; goto err2;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.qp_state = IB_QPS_RESET; qp->attr.qp_state = IB_QPS_RESET;
qp->valid = 1; qp->valid = 1;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return 0; return 0;
...@@ -492,24 +493,28 @@ static void rxe_qp_reset(struct rxe_qp *qp) ...@@ -492,24 +493,28 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move the qp to the error state */ /* move the qp to the error state */
void rxe_qp_error(struct rxe_qp *qp) void rxe_qp_error(struct rxe_qp *qp)
{ {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.qp_state = IB_QPS_ERR; qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */ /* drain work and packet queues */
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->resp.task);
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
int mask) int mask)
{ {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
qp->attr.sq_draining = 1; qp->attr.sq_draining = 1;
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
/* caller should hold qp->state_lock */ /* caller should hold qp->state_lock */
...@@ -555,14 +560,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -555,14 +560,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.cur_qp_state = attr->qp_state; qp->attr.cur_qp_state = attr->qp_state;
if (mask & IB_QP_STATE) { if (mask & IB_QP_STATE) {
spin_lock_bh(&qp->state_lock); unsigned long flags;
spin_lock_irqsave(&qp->state_lock, flags);
err = __qp_chk_state(qp, attr, mask); err = __qp_chk_state(qp, attr, mask);
if (!err) { if (!err) {
qp->attr.qp_state = attr->qp_state; qp->attr.qp_state = attr->qp_state;
rxe_dbg_qp(qp, "state -> %s\n", rxe_dbg_qp(qp, "state -> %s\n",
qps2str[attr->qp_state]); qps2str[attr->qp_state]);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (err) if (err)
return err; return err;
...@@ -688,6 +695,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -688,6 +695,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
/* called by the query qp verb */ /* called by the query qp verb */
int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
{ {
unsigned long flags;
*attr = qp->attr; *attr = qp->attr;
attr->rq_psn = qp->resp.psn; attr->rq_psn = qp->resp.psn;
...@@ -708,12 +717,12 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) ...@@ -708,12 +717,12 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
/* Applications that get this state typically spin on it. /* Applications that get this state typically spin on it.
* Yield the processor * Yield the processor
*/ */
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp->attr.sq_draining) { if (qp->attr.sq_draining) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
cond_resched(); cond_resched();
} else { } else {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
return 0; return 0;
...@@ -737,10 +746,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp) ...@@ -737,10 +746,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp)
static void rxe_qp_do_cleanup(struct work_struct *work) static void rxe_qp_do_cleanup(struct work_struct *work)
{ {
struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
qp->valid = 0; qp->valid = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
qp->qp_timeout_jiffies = 0; qp->qp_timeout_jiffies = 0;
if (qp_type(qp) == IB_QPT_RC) { if (qp_type(qp) == IB_QPT_RC) {
......
...@@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ...@@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_qp *qp) struct rxe_qp *qp)
{ {
unsigned int pkt_type; unsigned int pkt_type;
unsigned long flags;
if (unlikely(!qp->valid)) if (unlikely(!qp->valid))
return -EINVAL; return -EINVAL;
...@@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ...@@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return -EINVAL; return -EINVAL;
} }
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (pkt->mask & RXE_REQ_MASK) { if (pkt->mask & RXE_REQ_MASK) {
if (unlikely(qp_state(qp) < IB_QPS_RTR)) { if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return -EINVAL; return -EINVAL;
} }
} else { } else {
if (unlikely(qp_state(qp) < IB_QPS_RTS)) { if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return -EINVAL; return -EINVAL;
} }
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return 0; return 0;
} }
......
...@@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp) ...@@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp)
void rnr_nak_timer(struct timer_list *t) void rnr_nak_timer(struct timer_list *t)
{ {
struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
unsigned long flags;
rxe_dbg_qp(qp, "nak timer fired\n"); rxe_dbg_qp(qp, "nak timer fired\n");
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) { if (qp->valid) {
/* request a send queue retry */ /* request a send queue retry */
qp->req.need_retry = 1; qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0; qp->req.wait_for_rnr_timer = 0;
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static void req_check_sq_drain_done(struct rxe_qp *qp) static void req_check_sq_drain_done(struct rxe_qp *qp)
...@@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) ...@@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
unsigned int index; unsigned int index;
unsigned int cons; unsigned int cons;
struct rxe_send_wqe *wqe; struct rxe_send_wqe *wqe;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_SQD) { if (qp_state(qp) == IB_QPS_SQD) {
q = qp->sq.queue; q = qp->sq.queue;
index = qp->req.wqe_index; index = qp->req.wqe_index;
...@@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) ...@@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
break; break;
qp->attr.sq_draining = 0; qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->ibqp.event_handler) { if (qp->ibqp.event_handler) {
struct ib_event ev; struct ib_event ev;
...@@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp) ...@@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
return; return;
} while (0); } while (0);
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
} }
static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
...@@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp) ...@@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{ {
struct rxe_send_wqe *wqe; struct rxe_send_wqe *wqe;
unsigned long flags;
req_check_sq_drain_done(qp); req_check_sq_drain_done(qp);
...@@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) ...@@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
if (wqe == NULL) if (wqe == NULL)
return NULL; return NULL;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely((qp_state(qp) == IB_QPS_SQD) && if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
(wqe->state != wqe_state_processing))) { (wqe->state != wqe_state_processing))) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return NULL; return NULL;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe; return wqe;
...@@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp)
struct rxe_queue *q = qp->sq.queue; struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah; struct rxe_ah *ah;
struct rxe_av *av; struct rxe_av *av;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(!qp->valid)) { if (unlikely(!qp->valid)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
if (unlikely(qp_state(qp) == IB_QPS_ERR)) { if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
wqe = __req_next_wqe(qp); wqe = __req_next_wqe(qp);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (wqe) if (wqe)
goto err; goto err;
else else
...@@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp) ...@@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wait_psn = 0; qp->req.wait_psn = 0;
qp->req.need_retry = 0; qp->req.need_retry = 0;
qp->req.wait_for_rnr_timer = 0; qp->req.wait_for_rnr_timer = 0;
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
/* we come here if the retransmit timer has fired /* we come here if the retransmit timer has fired
* or if the rnr timer has fired. If the retransmit * or if the rnr timer has fired. If the retransmit
......
...@@ -1047,6 +1047,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, ...@@ -1047,6 +1047,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
struct ib_uverbs_wc *uwc = &cqe.uibwc; struct ib_uverbs_wc *uwc = &cqe.uibwc;
struct rxe_recv_wqe *wqe = qp->resp.wqe; struct rxe_recv_wqe *wqe = qp->resp.wqe;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device); struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
unsigned long flags;
if (!wqe) if (!wqe)
goto finish; goto finish;
...@@ -1137,12 +1138,12 @@ static enum resp_states do_complete(struct rxe_qp *qp, ...@@ -1137,12 +1138,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
return RESPST_ERR_CQ_OVERFLOW; return RESPST_ERR_CQ_OVERFLOW;
finish: finish:
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(qp_state(qp) == IB_QPS_ERR)) { if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return RESPST_CHK_RESOURCE; return RESPST_CHK_RESOURCE;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(!pkt)) if (unlikely(!pkt))
return RESPST_DONE; return RESPST_DONE;
...@@ -1468,18 +1469,19 @@ int rxe_responder(struct rxe_qp *qp) ...@@ -1468,18 +1469,19 @@ int rxe_responder(struct rxe_qp *qp)
enum resp_states state; enum resp_states state;
struct rxe_pkt_info *pkt = NULL; struct rxe_pkt_info *pkt = NULL;
int ret; int ret;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (!qp->valid || qp_state(qp) == IB_QPS_ERR || if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
qp_state(qp) == IB_QPS_RESET) { qp_state(qp) == IB_QPS_RESET) {
bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR); bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
drain_req_pkts(qp); drain_req_pkts(qp);
flush_recv_queue(qp, notify); flush_recv_queue(qp, notify);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit; goto exit;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
......
...@@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, ...@@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
if (!err) if (!err)
rxe_sched_task(&qp->req.task); rxe_sched_task(&qp->req.task);
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR) if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->comp.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return err; return err;
} }
...@@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
{ {
struct rxe_qp *qp = to_rqp(ibqp); struct rxe_qp *qp = to_rqp(ibqp);
int err; int err;
unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
/* caller has already called destroy_qp */ /* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) { if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed"); rxe_err_qp(qp, "qp has been destroyed");
return -EINVAL; return -EINVAL;
} }
if (unlikely(qp_state(qp) < IB_QPS_RTS)) { if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr; *bad_wr = wr;
rxe_err_qp(qp, "qp not ready to send"); rxe_err_qp(qp, "qp not ready to send");
return -EINVAL; return -EINVAL;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->is_user) { if (qp->is_user) {
/* Utilize process context to do protocol processing */ /* Utilize process context to do protocol processing */
...@@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
struct rxe_rq *rq = &qp->rq; struct rxe_rq *rq = &qp->rq;
unsigned long flags; unsigned long flags;
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
/* caller has already called destroy_qp */ /* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) { if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
rxe_err_qp(qp, "qp has been destroyed"); rxe_err_qp(qp, "qp has been destroyed");
return -EINVAL; return -EINVAL;
} }
/* see C10-97.2.1 */ /* see C10-97.2.1 */
if (unlikely((qp_state(qp) < IB_QPS_INIT))) { if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr; *bad_wr = wr;
rxe_dbg_qp(qp, "qp not ready to post recv"); rxe_dbg_qp(qp, "qp not ready to post recv");
return -EINVAL; return -EINVAL;
} }
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(qp->srq)) { if (unlikely(qp->srq)) {
*bad_wr = wr; *bad_wr = wr;
...@@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
spin_unlock_irqrestore(&rq->producer_lock, flags); spin_unlock_irqrestore(&rq->producer_lock, flags);
spin_lock_bh(&qp->state_lock); spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_ERR) if (qp_state(qp) == IB_QPS_ERR)
rxe_sched_task(&qp->resp.task); rxe_sched_task(&qp->resp.task);
spin_unlock_bh(&qp->state_lock); spin_unlock_irqrestore(&qp->state_lock, flags);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment