Commit 300ec741 authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: qla2xxx: Fix hardirq-unsafe locking

Since fc_remote_port_delete() must be called with interrupts enabled, do
not disable interrupts when calling that function. Remove the lockin calls
from around the put_sess() call. This is safe because the function that is
called when the final reference is dropped, qlt_unreg_sess(), grabs the
proper locks. This patch avoids that lockdep reports the following:

WARNING: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected
kworker/2:1/62 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire:
0000000009e679b3 (&(&k->k_lock)->rlock){+.+.}, at: klist_next+0x43/0x1d0

and this task is already holding:
00000000a033b71c (&(&ha->tgt.sess_lock)->rlock){-...}, at: qla24xx_delete_sess_fn+0x55/0xf0 [qla2xxx_scst]
which would create a new lock dependency:
 (&(&ha->tgt.sess_lock)->rlock){-...} -> (&(&k->k_lock)->rlock){+.+.}

but this new dependency connects a HARDIRQ-irq-safe lock:
 (&(&ha->tgt.sess_lock)->rlock){-...}

... which became HARDIRQ-irq-safe at:
  lock_acquire+0xe3/0x200
  _raw_spin_lock_irqsave+0x3d/0x60
  qla24xx_report_id_acquisition+0xa69/0xe30 [qla2xxx_scst]
  qla24xx_process_response_queue+0x69e/0x1270 [qla2xxx_scst]
  qla24xx_msix_rsp_q+0x79/0xf0 [qla2xxx_scst]
  __handle_irq_event_percpu+0x79/0x3c0
  handle_irq_event_percpu+0x70/0xf0
  handle_irq_event+0x5a/0x8b
  handle_edge_irq+0x12c/0x310
  handle_irq+0x192/0x20a
  do_IRQ+0x73/0x160
  ret_from_intr+0x0/0x1d
  default_idle+0x23/0x1f0
  arch_cpu_idle+0x15/0x20
  default_idle_call+0x35/0x40
  do_idle+0x2bb/0x2e0
  cpu_startup_entry+0x1d/0x20
  start_secondary+0x2a8/0x320
  secondary_startup_64+0xa4/0xb0

to a HARDIRQ-irq-unsafe lock:
 (&(&k->k_lock)->rlock){+.+.}

... which became HARDIRQ-irq-unsafe at:
...
  lock_acquire+0xe3/0x200
  _raw_spin_lock+0x32/0x50
  klist_add_tail+0x33/0xb0
  device_add+0x7e1/0xb50
  device_create_groups_vargs+0x11c/0x150
  device_create_with_groups+0x89/0xb0
  vtconsole_class_init+0xb2/0x124
  do_one_initcall+0xc5/0x3ce
  kernel_init_freeable+0x295/0x32e
  kernel_init+0x11/0x11b
  ret_from_fork+0x3a/0x50

other info that might help us debug this:

 Possible interrupt unsafe locking scenario:

       CPU0                    CPU1
       ----                    ----
  lock(&(&k->k_lock)->rlock);
                               local_irq_disable();
                               lock(&(&ha->tgt.sess_lock)->rlock);
                               lock(&(&k->k_lock)->rlock);
  <Interrupt>
    lock(&(&ha->tgt.sess_lock)->rlock);

 *** DEADLOCK ***

3 locks held by kworker/2:1/62:
 #0: 00000000a4319c16 ((wq_completion)"qla2xxx_wq"){+.+.}, at: process_one_work+0x437/0xa80
 #1: 00000000ffa34c42 ((work_completion)(&sess->del_work)){+.+.}, at: process_one_work+0x437/0xa80
 #2: 00000000a033b71c (&(&ha->tgt.sess_lock)->rlock){-...}, at: qla24xx_delete_sess_fn+0x55/0xf0 [qla2xxx_scst]

the dependencies between HARDIRQ-irq-safe lock and the holding lock:
-> (&(&ha->tgt.sess_lock)->rlock){-...} ops: 8 {
   IN-HARDIRQ-W at:
                    lock_acquire+0xe3/0x200
                    _raw_spin_lock_irqsave+0x3d/0x60
                    qla24xx_report_id_acquisition+0xa69/0xe30 [qla2xxx_scst]
                    qla24xx_process_response_queue+0x69e/0x1270 [qla2xxx_scst]
                    qla24xx_msix_rsp_q+0x79/0xf0 [qla2xxx_scst]
                    __handle_irq_event_percpu+0x79/0x3c0
                    handle_irq_event_percpu+0x70/0xf0
                    handle_irq_event+0x5a/0x8b
                    handle_edge_irq+0x12c/0x310
                    handle_irq+0x192/0x20a
                    do_IRQ+0x73/0x160
                    ret_from_intr+0x0/0x1d
                    default_idle+0x23/0x1f0
                    arch_cpu_idle+0x15/0x20
                    default_idle_call+0x35/0x40
                    do_idle+0x2bb/0x2e0
                    cpu_startup_entry+0x1d/0x20
                    start_secondary+0x2a8/0x320
                    secondary_startup_64+0xa4/0xb0
   INITIAL USE at:
                   lock_acquire+0xe3/0x200
                   _raw_spin_lock_irqsave+0x3d/0x60
                   qla24xx_report_id_acquisition+0xa69/0xe30 [qla2xxx_scst]
                   qla24xx_process_response_queue+0x69e/0x1270 [qla2xxx_scst]
                   qla24xx_msix_rsp_q+0x79/0xf0 [qla2xxx_scst]
                   __handle_irq_event_percpu+0x79/0x3c0
                   handle_irq_event_percpu+0x70/0xf0
                   handle_irq_event+0x5a/0x8b
                   handle_edge_irq+0x12c/0x310
                   handle_irq+0x192/0x20a
                   do_IRQ+0x73/0x160
                   ret_from_intr+0x0/0x1d
                   default_idle+0x23/0x1f0
                   arch_cpu_idle+0x15/0x20
                   default_idle_call+0x35/0x40
                   do_idle+0x2bb/0x2e0
                   cpu_startup_entry+0x1d/0x20
                   start_secondary+0x2a8/0x320
                   secondary_startup_64+0xa4/0xb0
 }
 ... key      at: [<ffffffffa0c0d080>] __key.85462+0x0/0xfffffffffff7df80 [qla2xxx_scst]
 ... acquired at:
   lock_acquire+0xe3/0x200
   _raw_spin_lock_irqsave+0x3d/0x60
   klist_next+0x43/0x1d0
   device_for_each_child+0x96/0x110
   scsi_target_block+0x3c/0x40 [scsi_mod]
   fc_remote_port_delete+0xe7/0x1c0 [scsi_transport_fc]
   qla2x00_mark_device_lost+0xa0b/0xa30 [qla2xxx_scst]
   qlt_unreg_sess+0x1c6/0x380 [qla2xxx_scst]
   qla24xx_delete_sess_fn+0xe6/0xf0 [qla2xxx_scst]
   process_one_work+0x511/0xa80
   worker_thread+0x67/0x5b0
   kthread+0x1d2/0x1f0
   ret_from_fork+0x3a/0x50

the dependencies between the lock to be acquired
 and HARDIRQ-irq-unsafe lock:
-> (&(&k->k_lock)->rlock){+.+.} ops: 13831 {
   HARDIRQ-ON-W at:
                    lock_acquire+0xe3/0x200
                    _raw_spin_lock+0x32/0x50
                    klist_add_tail+0x33/0xb0
                    device_add+0x7e1/0xb50
                    device_create_groups_vargs+0x11c/0x150
                    device_create_with_groups+0x89/0xb0
                    vtconsole_class_init+0xb2/0x124
                    do_one_initcall+0xc5/0x3ce
                    kernel_init_freeable+0x295/0x32e
                    kernel_init+0x11/0x11b
                    ret_from_fork+0x3a/0x50
   SOFTIRQ-ON-W at:
                    lock_acquire+0xe3/0x200
                    _raw_spin_lock+0x32/0x50
                    klist_add_tail+0x33/0xb0
                    device_add+0x7e1/0xb50
                    device_create_groups_vargs+0x11c/0x150
                    device_create_with_groups+0x89/0xb0
                    vtconsole_class_init+0xb2/0x124
                    do_one_initcall+0xc5/0x3ce
                    kernel_init_freeable+0x295/0x32e
                    kernel_init+0x11/0x11b
                    ret_from_fork+0x3a/0x50
   INITIAL USE at:
                   lock_acquire+0xe3/0x200
                   _raw_spin_lock+0x32/0x50
                   klist_add_tail+0x33/0xb0
                   device_add+0x7e1/0xb50
                   device_create_groups_vargs+0x11c/0x150
                   device_create_with_groups+0x89/0xb0
                   vtconsole_class_init+0xb2/0x124
                   do_one_initcall+0xc5/0x3ce
                   kernel_init_freeable+0x295/0x32e
                   kernel_init+0x11/0x11b
                   ret_from_fork+0x3a/0x50
 }
 ... key      at: [<ffffffff83ed8780>] __key.15491+0x0/0x40
 ... acquired at:
   lock_acquire+0xe3/0x200
   _raw_spin_lock_irqsave+0x3d/0x60
   klist_next+0x43/0x1d0
   device_for_each_child+0x96/0x110
   scsi_target_block+0x3c/0x40 [scsi_mod]
   fc_remote_port_delete+0xe7/0x1c0 [scsi_transport_fc]
   qla2x00_mark_device_lost+0xa0b/0xa30 [qla2xxx_scst]
   qlt_unreg_sess+0x1c6/0x380 [qla2xxx_scst]
   qla24xx_delete_sess_fn+0xe6/0xf0 [qla2xxx_scst]
   process_one_work+0x511/0xa80
   worker_thread+0x67/0x5b0
   kthread+0x1d2/0x1f0
   ret_from_fork+0x3a/0x50

stack backtrace:
CPU: 2 PID: 62 Comm: kworker/2:1 Tainted: G           O      5.0.7-dbg+ #8
Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
Workqueue: qla2xxx_wq qla24xx_delete_sess_fn [qla2xxx_scst]
Call Trace:
 dump_stack+0x86/0xca
 check_usage.cold.52+0x473/0x563
 __lock_acquire+0x11c0/0x23e0
 lock_acquire+0xe3/0x200
 _raw_spin_lock_irqsave+0x3d/0x60
 klist_next+0x43/0x1d0
 device_for_each_child+0x96/0x110
 scsi_target_block+0x3c/0x40 [scsi_mod]
 fc_remote_port_delete+0xe7/0x1c0 [scsi_transport_fc]
 qla2x00_mark_device_lost+0xa0b/0xa30 [qla2xxx_scst]
 qlt_unreg_sess+0x1c6/0x380 [qla2xxx_scst]
 qla24xx_delete_sess_fn+0xe6/0xf0 [qla2xxx_scst]
 process_one_work+0x511/0xa80
 worker_thread+0x67/0x5b0
 kthread+0x1d2/0x1f0
 ret_from_fork+0x3a/0x50

Cc: Himanshu Madhani <hmadhani@marvell.com>
Cc: Giridhar Malavali <gmalavali@marvell.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Acked-by: default avatarHimanshu Madhani <hmadhani@marvell.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent db4bf822
...@@ -682,7 +682,6 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, ...@@ -682,7 +682,6 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{ {
fc_port_t *t; fc_port_t *t;
unsigned long flags;
switch (e->u.nack.type) { switch (e->u.nack.type) {
case SRB_NACK_PRLI: case SRB_NACK_PRLI:
...@@ -695,10 +694,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) ...@@ -695,10 +694,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
if (t) { if (t) {
ql_log(ql_log_info, vha, 0xd034, ql_log(ql_log_info, vha, 0xd034,
"%s create sess success %p", __func__, t); "%s create sess success %p", __func__, t);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create sess has an extra kref */ /* create sess has an extra kref */
vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
} }
break; break;
} }
...@@ -710,9 +707,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work) ...@@ -710,9 +707,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
{ {
fc_port_t *fcport = container_of(work, struct fc_port, del_work); fc_port_t *fcport = container_of(work, struct fc_port, del_work);
struct qla_hw_data *ha = fcport->vha->hw; struct qla_hw_data *ha = fcport->vha->hw;
unsigned long flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (fcport->se_sess) { if (fcport->se_sess) {
ha->tgt.tgt_ops->shutdown_sess(fcport); ha->tgt.tgt_ops->shutdown_sess(fcport);
...@@ -720,7 +714,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work) ...@@ -720,7 +714,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
} else { } else {
qlt_unreg_sess(fcport); qlt_unreg_sess(fcport);
} }
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
} }
/* /*
...@@ -789,8 +782,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) ...@@ -789,8 +782,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->port_name, sess->loop_id); fcport->port_name, sess->loop_id);
sess->local = 0; sess->local = 0;
} }
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
} }
/* /*
...@@ -4175,9 +4169,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) ...@@ -4175,9 +4169,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
/* /*
* Drop extra session reference from qlt_handle_cmd_for_atio(). * Drop extra session reference from qlt_handle_cmd_for_atio().
*/ */
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return; return;
out_term: out_term:
...@@ -4194,9 +4186,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) ...@@ -4194,9 +4186,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
target_free_tag(sess->se_sess, &cmd->se_cmd); target_free_tag(sess->se_sess, &cmd->se_cmd);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
} }
static void qlt_do_work(struct work_struct *work) static void qlt_do_work(struct work_struct *work)
...@@ -4405,9 +4395,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, ...@@ -4405,9 +4395,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
if (!cmd) { if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3062, ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return -EBUSY; return -EBUSY;
} }
...@@ -6258,17 +6246,19 @@ static void qlt_abort_work(struct qla_tgt *tgt, ...@@ -6258,17 +6246,19 @@ static void qlt_abort_work(struct qla_tgt *tgt,
} }
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
ha->tgt.tgt_ops->put_sess(sess);
if (rc != 0) if (rc != 0)
goto out_term; goto out_term;
return; return;
out_term2: out_term2:
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
if (sess) if (sess)
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
out_term: out_term:
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
...@@ -6326,9 +6316,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt, ...@@ -6326,9 +6316,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
if (rc != 0) if (rc != 0)
goto out_term; goto out_term;
return; return;
......
...@@ -340,7 +340,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess) ...@@ -340,7 +340,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
if (!sess) if (!sess)
return; return;
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
} }
...@@ -798,7 +797,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) ...@@ -798,7 +797,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
{ {
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
target_sess_cmd_list_set_waiting(sess->se_sess); target_sess_cmd_list_set_waiting(sess->se_sess);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment