Commit 50759b88 authored by Bart Van Assche's avatar Bart Van Assche Committed by Martin K. Petersen

scsi: device_handler: alua: Call scsi_device_put() from non-atomic context

Since commit f93ed747 ("scsi: core: Release SCSI devices
synchronously"), scsi_device_put() might sleep. Avoid calling it from
alua_rtpg_queue() with the pg_lock held. The lock only pretects h->pg,
anyway. To avoid the pg being freed under us, because of a race with
another thread, take a temporary reference. In alua_rtpg_queue(), verify
that the pg still belongs to the sdev being passed before actually queueing
the RTPG.

This patch fixes the following smatch warning:

drivers/scsi/device_handler/scsi_dh_alua.c:1013 alua_rtpg_queue() warn: sleeping in atomic context

alua_check_vpd() <- disables preempt
-> alua_rtpg_queue()
   -> scsi_device_put()

Cc: Martin Wilck <mwilck@suse.com>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Sachin Sant <sachinp@linux.ibm.com>
Cc: Benjamin Block <bblock@linux.ibm.com>
Suggested-by: default avatarMartin Wilck <mwilck@suse.com>
Reported-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20221117183626.2656196-3-bvanassche@acm.orgTested-by: default avatarSachin Sant <sachinp@linux.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent a500c4cc
...@@ -354,6 +354,8 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, ...@@ -354,6 +354,8 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
"%s: port group %x rel port %x\n", "%s: port group %x rel port %x\n",
ALUA_DH_NAME, group_id, rel_port); ALUA_DH_NAME, group_id, rel_port);
kref_get(&pg->kref);
/* Check for existing port group references */ /* Check for existing port group references */
spin_lock(&h->pg_lock); spin_lock(&h->pg_lock);
old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
...@@ -373,11 +375,11 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, ...@@ -373,11 +375,11 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
list_add_rcu(&h->node, &pg->dh_list); list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags); spin_unlock_irqrestore(&pg->lock, flags);
alua_rtpg_queue(rcu_dereference_protected(h->pg,
lockdep_is_held(&h->pg_lock)),
sdev, NULL, true);
spin_unlock(&h->pg_lock); spin_unlock(&h->pg_lock);
alua_rtpg_queue(pg, sdev, NULL, true);
kref_put(&pg->kref, release_port_group);
if (old_pg) if (old_pg)
kref_put(&old_pg->kref, release_port_group); kref_put(&old_pg->kref, release_port_group);
...@@ -986,6 +988,9 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, ...@@ -986,6 +988,9 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
{ {
int start_queue = 0; int start_queue = 0;
unsigned long flags; unsigned long flags;
might_sleep();
if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
return false; return false;
...@@ -996,11 +1001,17 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, ...@@ -996,11 +1001,17 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
force = true; force = true;
} }
if (pg->rtpg_sdev == NULL) { if (pg->rtpg_sdev == NULL) {
struct alua_dh_data *h = sdev->handler_data;
rcu_read_lock();
if (h && rcu_dereference(h->pg) == pg) {
pg->interval = 0; pg->interval = 0;
pg->flags |= ALUA_PG_RUN_RTPG; pg->flags |= ALUA_PG_RUN_RTPG;
kref_get(&pg->kref); kref_get(&pg->kref);
pg->rtpg_sdev = sdev; pg->rtpg_sdev = sdev;
start_queue = 1; start_queue = 1;
}
rcu_read_unlock();
} else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
pg->flags |= ALUA_PG_RUN_RTPG; pg->flags |= ALUA_PG_RUN_RTPG;
/* Do not queue if the worker is already running */ /* Do not queue if the worker is already running */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment