Commit 28638887 authored by Roland Dreier's avatar Roland Dreier Committed by Nicholas Bellinger

target: Convert acl_node_lock to be IRQ-disabling

With qla2xxx, acl_node_lock is taken inside qla2xxx's hardware_lock,
which is taken in hardirq context.  This means acl_node_lock must become
an IRQ-disabling lock; in particular this fixes lockdep warnings along
the lines of

    ======================================================
    [ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ]

     (&(&se_tpg->acl_node_lock)->rlock){+.....}, at: [<ffffffffa026f872>] transport_deregister_session+0x92/0x140 [target_core_mod]

    and this task is already holding:
     (&(&ha->hardware_lock)->rlock){-.-...}, at: [<ffffffffa017c5e7>] qla_tgt_stop_phase1+0x57/0x2c0 [qla2xxx]
    which would create a new lock dependency:
     (&(&ha->hardware_lock)->rlock){-.-...} -> (&(&se_tpg->acl_node_lock)->rlock){+.....}

    but this new dependency connects a HARDIRQ-irq-safe lock:
     (&(&ha->hardware_lock)->rlock){-.-...}

    to a HARDIRQ-irq-unsafe lock:
     (&(&se_tpg->acl_node_lock)->rlock){+.....}
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent e63a8e19
...@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) ...@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
struct se_dev_entry *deve; struct se_dev_entry *deve;
u32 i; u32 i;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_irq(&nacl->device_list_lock); spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
...@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) ...@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
} }
spin_unlock_irq(&nacl->device_list_lock); spin_unlock_irq(&nacl->device_list_lock);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
} }
static struct se_port *core_alloc_port(struct se_device *dev) static struct se_port *core_alloc_port(struct se_device *dev)
...@@ -1372,17 +1372,17 @@ struct se_lun *core_dev_add_lun( ...@@ -1372,17 +1372,17 @@ struct se_lun *core_dev_add_lun(
*/ */
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl; struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl && if (acl->dynamic_node_acl &&
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
core_tpg_add_node_to_devs(acl, tpg); core_tpg_add_node_to_devs(acl, tpg);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
} }
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
} }
return lun_p; return lun_p;
......
...@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port( ...@@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port(
* from the decoded fabric module specific TransportID * from the decoded fabric module specific TransportID
* at *i_str. * at *i_str.
*/ */
spin_lock_bh(&tmp_tpg->acl_node_lock); spin_lock_irq(&tmp_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl( dest_node_acl = __core_tpg_get_initiator_node_acl(
tmp_tpg, i_str); tmp_tpg, i_str);
if (dest_node_acl) { if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count); atomic_inc(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
} }
spin_unlock_bh(&tmp_tpg->acl_node_lock); spin_unlock_irq(&tmp_tpg->acl_node_lock);
if (!dest_node_acl) { if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg); core_scsi3_tpg_undepend_item(tmp_tpg);
...@@ -3496,14 +3496,14 @@ static int core_scsi3_emulate_pro_register_and_move( ...@@ -3496,14 +3496,14 @@ static int core_scsi3_emulate_pro_register_and_move(
/* /*
* Locate the destination struct se_node_acl from the received Transport ID * Locate the destination struct se_node_acl from the received Transport ID
*/ */
spin_lock_bh(&dest_se_tpg->acl_node_lock); spin_lock_irq(&dest_se_tpg->acl_node_lock);
dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
initiator_str); initiator_str);
if (dest_node_acl) { if (dest_node_acl) {
atomic_inc(&dest_node_acl->acl_pr_ref_count); atomic_inc(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_inc(); smp_mb__after_atomic_inc();
} }
spin_unlock_bh(&dest_se_tpg->acl_node_lock); spin_unlock_irq(&dest_se_tpg->acl_node_lock);
if (!dest_node_acl) { if (!dest_node_acl) {
pr_err("Unable to locate %s dest_node_acl for" pr_err("Unable to locate %s dest_node_acl for"
......
...@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( ...@@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
{ {
struct se_node_acl *acl; struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!strcmp(acl->initiatorname, initiatorname) && if (!strcmp(acl->initiatorname, initiatorname) &&
!acl->dynamic_node_acl) { !acl->dynamic_node_acl) {
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return acl; return acl;
} }
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return NULL; return NULL;
} }
...@@ -309,10 +309,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( ...@@ -309,10 +309,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
else else
core_tpg_add_node_to_devs(acl, tpg); core_tpg_add_node_to_devs(acl, tpg);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list); list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++; tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
...@@ -362,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -362,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
{ {
struct se_node_acl *acl = NULL; struct se_node_acl *acl = NULL;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (acl) { if (acl) {
if (acl->dynamic_node_acl) { if (acl->dynamic_node_acl) {
...@@ -370,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -370,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
pr_debug("%s_TPG[%u] - Replacing dynamic ACL" pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(), " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
/* /*
* Release the locally allocated struct se_node_acl * Release the locally allocated struct se_node_acl
* because * core_tpg_add_initiator_node_acl() returned * because * core_tpg_add_initiator_node_acl() returned
...@@ -386,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -386,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
" Node %s already exists for TPG %u, ignoring" " Node %s already exists for TPG %u, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(), " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST); return ERR_PTR(-EEXIST);
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
if (!se_nacl) { if (!se_nacl) {
pr_err("struct se_node_acl pointer is NULL\n"); pr_err("struct se_node_acl pointer is NULL\n");
...@@ -426,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( ...@@ -426,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list); list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++; tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
done: done:
pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
...@@ -453,14 +453,14 @@ int core_tpg_del_initiator_node_acl( ...@@ -453,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
struct se_session *sess, *sess_tmp; struct se_session *sess, *sess_tmp;
int dynamic_acl = 0; int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) { if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0; acl->dynamic_node_acl = 0;
dynamic_acl = 1; dynamic_acl = 1;
} }
list_del(&acl->acl_list); list_del(&acl->acl_list);
tpg->num_node_acls--; tpg->num_node_acls--;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock); spin_lock_bh(&tpg->session_lock);
list_for_each_entry_safe(sess, sess_tmp, list_for_each_entry_safe(sess, sess_tmp,
...@@ -511,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -511,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
struct se_node_acl *acl; struct se_node_acl *acl;
int dynamic_acl = 0; int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!acl) { if (!acl) {
pr_err("Access Control List entry for %s Initiator" pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring" " Node %s does not exists for TPG %hu, ignoring"
" request.\n", tpg->se_tpg_tfo->get_fabric_name(), " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return -ENODEV; return -ENODEV;
} }
if (acl->dynamic_node_acl) { if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0; acl->dynamic_node_acl = 0;
dynamic_acl = 1; dynamic_acl = 1;
} }
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock); spin_lock_bh(&tpg->session_lock);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
...@@ -541,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -541,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
tpg->se_tpg_tfo->get_fabric_name(), initiatorname); tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock); spin_unlock_bh(&tpg->session_lock);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl) if (dynamic_acl)
acl->dynamic_node_acl = 1; acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return -EEXIST; return -EEXIST;
} }
/* /*
...@@ -579,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -579,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
if (init_sess) if (init_sess)
tpg->se_tpg_tfo->close_session(init_sess); tpg->se_tpg_tfo->close_session(init_sess);
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl) if (dynamic_acl)
acl->dynamic_node_acl = 1; acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return -EINVAL; return -EINVAL;
} }
spin_unlock_bh(&tpg->session_lock); spin_unlock_bh(&tpg->session_lock);
...@@ -598,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth( ...@@ -598,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
initiatorname, tpg->se_tpg_tfo->get_fabric_name(), initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg)); tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_lock_bh(&tpg->acl_node_lock); spin_lock_irq(&tpg->acl_node_lock);
if (dynamic_acl) if (dynamic_acl)
acl->dynamic_node_acl = 1; acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock); spin_unlock_irq(&tpg->acl_node_lock);
return 0; return 0;
} }
...@@ -725,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) ...@@ -725,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* not been released because of TFO->tpg_check_demo_mode_cache() == 1 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
* in transport_deregister_session(). * in transport_deregister_session().
*/ */
spin_lock_bh(&se_tpg->acl_node_lock); spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
acl_list) { acl_list) {
list_del(&nacl->acl_list); list_del(&nacl->acl_list);
se_tpg->num_node_acls--; se_tpg->num_node_acls--;
spin_unlock_bh(&se_tpg->acl_node_lock); spin_unlock_irq(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(nacl); core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg); core_free_device_list_for_node(nacl, se_tpg);
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
spin_lock_bh(&se_tpg->acl_node_lock); spin_lock_irq(&se_tpg->acl_node_lock);
} }
spin_unlock_bh(&se_tpg->acl_node_lock); spin_unlock_irq(&se_tpg->acl_node_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_release_virtual_lun0(se_tpg); core_tpg_release_virtual_lun0(se_tpg);
......
...@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) ...@@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
struct se_portal_group *se_tpg = &tpg->se_tpg; struct se_portal_group *se_tpg = &tpg->se_tpg;
struct se_node_acl *se_acl; struct se_node_acl *se_acl;
spin_lock_bh(&se_tpg->acl_node_lock); spin_lock_irq(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl); acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
pr_debug("acl %p port_name %llx\n", pr_debug("acl %p port_name %llx\n",
...@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) ...@@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
break; break;
} }
} }
spin_unlock_bh(&se_tpg->acl_node_lock); spin_unlock_irq(&se_tpg->acl_node_lock);
return found; return found;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment